text
stringlengths 11
4.05M
|
|---|
package main
import "fmt"
func main() {
//iota 一般应用在枚举中
//iota自增
//在自增出现常量,后面的值会和常量相等
const (
one,two=iota+1,iota+2
three,four
fie,six
)
fmt.Println(one,two,three,four,fie,six)
}
|
package main
import "testing"
func TestSet(t *testing.T) {
env := make(Env)
key, value := "TEST", "VALUE"
env.Set(key, value)
actualValue, ok := env[key]
if !ok {
t.Fatalf("expected to have %s in env", key)
}
if value != actualValue {
t.Fatalf("expected the value to be %s, but got %s", value, actualValue)
}
}
func TestSetBuiltin(t *testing.T) {
env := make(Env)
key, value := "TEST", "VALUE"
env.SetBuiltin(key, value)
expected := EnvBuiltinPrefix + key
actualValue, ok := env[expected]
if !ok {
t.Fatalf("expected to have %s in env", expected)
}
if value != actualValue {
t.Fatalf("expected the value to be %s, but got %s", value, actualValue)
}
}
func TestEncode(t *testing.T) {
env := make(Env)
key, value := "TEST", "VALUE"
env.Set(key, value)
env.SetBuiltin(key, value)
encoded := env.Encode()
if len(encoded) != 2 {
t.Fatalf("expected to get 2 key-value pairs, but got %d", len(encoded))
}
expecteds := []string{"TEST=VALUE", "GOTOPUS_TEST=VALUE"}
var expected string
for len(expecteds) > 0 {
expected, expecteds = expecteds[0], expecteds[1:]
var found bool
for _, e := range encoded {
if e == expected {
found = true
}
}
if !found {
t.Fatalf("expected to have %s", expected)
}
}
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
/*
println("hello,world")
fmt.Println("hello,world")
n := 100 + 200
m := n + 100
msg := "hoge" + "fuga"
//if
if n == 300 {
fmt.Println(n)
} else if n == 100 {
fmt.Print(m)
} else {
fmt.Println(msg)
}
//switch
switch n {
case 100:
fmt.Println("n=100")
default:
fmt.Println("default")
}
for i := 0; i < 100; i++ {
print(i)
}
var x int
LOOP:
for {
fmt.Println(x)
x++
if x == 100{
break LOOP
}
}
*/
// 100をHex型として代入
var hex Hex = 100
// Stringメソッドを呼び出す
fmt.Println(hex.String())
}
func main2() {
for x := 0; x < 100; x++ {
if x%2 == 0 {
fmt.Println(x, ":even")
} else {
fmt.Println(x, ":odd")
}
}
}
func main3() {
t := time.Now().UnixNano()
rand.Seed(t)
n := rand.Intn(6)
switch n {
case 6:
fmt.Println("大吉")
case 5, 4:
fmt.Println("中吉")
case 3, 2:
fmt.Println("小吉")
default:
fmt.Println("凶")
}
}
func main4() {
var sum int
sum = 5 + 6 + 3
var avg float32 = (float32(sum / 3))
if avg > 4.5 {
println("good")
}
}
func main5() {
var a, b, c bool
if a && b || !c {
println("True")
} else {
println("false")
}
}
func main6() {
p := struct {
name string
age int
}{
name: "Gopher",
age: 10,
}
p.age++
fmt.Println(p.name, p.age)
var ns = [5]int{1, 2, 3, 4, 5}
println(ns[3])
println(len(ns))
println(ns[1:3])
var slice = []int{10, 20, 30, 40, 50}
fmt.Println(slice)
m := map[string]int{"x": 10, "y": 20}
//println(m["x"])
m["z"] = 30
//println(m["z"])
if n, ok := m["s"]; ok {
fmt.Println("n:", n)
fmt.Println("ok:", ok)
} else {
fmt.Println("n:", n)
fmt.Println("ok:", ok)
}
type Person struct {
Name string
}
var test_name Person
test_name.Name = "aaa"
test_name2 := Person{Name: "bbb"}
fmt.Println(test_name2.Name)
}
func add(x int, y int) int {
return x + y
}
func swap(x, y int) (int, int) {
return y, x
}
func swap2(x, y int) (x2 int, y2 int) {
y2, x2 = x, y
return y2, x2
}
func main7() {
p := struct {
age int
name string
}{age: 10, name: "Gopher"}
p2 := p // コピー
p2.age = 20
println(p.age, p.name)
var x int = 100
var xp *int
xp = &x
print(*xp)
}
type Hex int
func (h Hex) String() string {
return fmt.Sprintf("%x", int(h))
}
|
package leetcode
import (
"reflect"
"testing"
)
func TestFloodFill(t *testing.T) {
if !reflect.DeepEqual(floodFill([][]int{
[]int{1, 1, 1},
[]int{1, 1, 0},
[]int{1, 0, 1},
}, 1, 1, 2), [][]int{
[]int{2, 2, 2},
[]int{2, 2, 0},
[]int{2, 0, 1},
}) {
t.Fatal()
}
}
|
package bslib
import (
"errors"
"testing"
)
const cTestItemName01 = "hjb cwec78hduycbwj dbwne w"
const cTestItemIcon01 = "fas fa-ambulance"
const cTestItemName02 = "98jmwhj2ndycwbcjdwlmdk"
const cTestItemIcon02 = "fab fa-linkedin"
func testHelperCreateItem() (itemId int64, err error) {
bsInstance := GetInstance()
err = bsInstance.Unlock(dbPass)
if err != nil {
return 0, err
}
response, err := bsInstance.AddNewItem(
UpdateItemForm{
BSItem: BSItem{
Name: cTestItemName01,
Icon: cTestItemIcon01,
},
},
)
if err != nil {
return 0, err
}
if response.Status != ConstSuccessResponse {
return 0, errors.New("response is not successful")
}
errLock := bsInstance.Lock()
if errLock != nil {
return 0, errLock
}
return response.ItemID, nil
}
func TestUpdateItemName(t *testing.T) {
itemId, err := testHelperCreateItem()
if err != nil {
t.Error(err)
t.FailNow()
return
}
bsInstance := GetInstance()
errPass := bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
updateResponse, errUpdated := bsInstance.UpdateItem(
UpdateItemForm{
BSItem: BSItem{
ID: itemId,
Name: cTestItemName02,
},
},
)
if errUpdated != nil {
t.Error(errPass)
t.FailNow()
return
}
if updateResponse.Status != ConstSuccessResponse {
t.Error(errors.New("update response is not successful"))
t.FailNow()
return
}
errLock := bsInstance.Lock()
if errLock != nil {
t.Error(errLock)
t.FailNow()
return
}
errPass = bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
item, respErr := bsInstance.ReadItemById(itemId, false)
if respErr != nil {
t.Error(respErr)
t.FailNow()
return
}
if item.ID != itemId {
t.Error(errors.New("response item is wrong"))
t.FailNow()
return
}
if item.Name != cTestItemName02 {
t.Errorf("Expected '%s' after update, retrieved '%s'", cTestItemName02, item.Name)
t.FailNow()
return
}
}
func TestDeleteItem(t *testing.T) {
itemId, err := testHelperCreateItem()
if err != nil {
t.Error(err)
t.FailNow()
return
}
bsInstance := GetInstance()
errPass := bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
delResponse, errDelete := bsInstance.DeleteItem(
UpdateItemForm{
BSItem: BSItem{
ID: itemId,
},
},
)
if errDelete != nil {
t.Error(errPass)
t.FailNow()
return
}
if delResponse.Status != ConstSuccessResponse {
t.Error(errors.New("deletion response is not successful"))
t.FailNow()
return
}
errLock := bsInstance.Lock()
if errLock != nil {
t.Error(errLock)
t.FailNow()
return
}
errPass = bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
items, respErr := bsInstance.ReadAllItems(false, false)
if respErr != nil {
t.Error(respErr)
t.FailNow()
return
}
for _, item := range items {
if item.ID == itemId && item.Deleted == false {
t.Error(errors.New("deleted item is found as not deleted"))
t.FailNow()
return
}
}
itemsWithDeleted, respErr2 := bsInstance.ReadAllItems(false, true)
if respErr2 != nil {
t.Error(respErr2)
t.FailNow()
return
}
foundInDeleted := false
for _, item := range itemsWithDeleted {
if item.ID == itemId && item.Deleted {
foundInDeleted = true
break
}
}
if !foundInDeleted {
t.Error(errors.New("deleted item is not found in deleted items"))
t.FailNow()
}
// TODO: Check if fields of deleted items are deleted too
}
const cTestNonExistingIcon = "djcndkcnkd"
func TestAddItemWithNonExistingIcon(t *testing.T) {
bsInstance := GetInstance()
errPass := bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
response, err := bsInstance.AddNewItem(
UpdateItemForm{
BSItem: BSItem{
Name: cTestItemName01,
Icon: cTestNonExistingIcon,
},
},
)
if err == nil {
t.Error(errors.New("no error returned in spite of the fact that icon is not existing"))
t.FailNow()
return
}
if response.Status == ConstSuccessResponse {
t.Error(errors.New("item was added in spite of the fact that icon is not existing"))
t.FailNow()
return
}
}
func TestAddItem(t *testing.T) {
itemId, err := testHelperCreateItem()
if err != nil {
t.Error(err)
t.FailNow()
return
}
bsInstance := GetInstance()
errPass := bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
item, respErr := bsInstance.ReadItemById(itemId, false)
if respErr != nil {
t.Error(respErr)
t.FailNow()
return
}
if item.ID == itemId && item.Name == cTestItemName01 {
return
}
t.Error(errors.New("created item is not found"))
t.FailNow()
}
func TestUpdateItemIcon(t *testing.T) {
itemId, err := testHelperCreateItem()
if err != nil {
t.Error(err)
t.FailNow()
return
}
bsInstance := GetInstance()
errPass := bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
updateIconResponse, errIconUpdated := bsInstance.UpdateItem(
UpdateItemForm{
BSItem: BSItem{
ID: itemId,
Icon: cTestItemIcon02,
},
},
)
if errIconUpdated != nil {
t.Error(errPass)
t.FailNow()
return
}
if updateIconResponse.Status != ConstSuccessResponse {
t.Error(errors.New("icon update response is not successful"))
t.FailNow()
return
}
errLock := bsInstance.Lock()
if errLock != nil {
t.Error(errLock)
t.FailNow()
return
}
errPass = bsInstance.Unlock(dbPass)
if errPass != nil {
t.Error(errPass)
t.FailNow()
return
}
iconUpdatedItem, respErr := bsInstance.ReadItemById(itemId, false)
if respErr != nil {
t.Error(respErr)
t.FailNow()
return
}
if iconUpdatedItem.ID != itemId {
t.Error(errors.New("response item is wrong"))
t.FailNow()
return
}
if iconUpdatedItem.Icon != cTestItemIcon02 {
t.Errorf("Expected icon value '%s' after update, retrieved '%s'", cTestItemIcon02, iconUpdatedItem.Icon)
t.FailNow()
return
}
}
|
package sqlbatch
import (
"database/sql"
"github.com/lib/pq"
"time"
"unsafe"
)
//--------------------------------------------------------------------------
func makeNullBoolPtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*sql.NullBool)(p)
}
}
func makeNullFloat64PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*sql.NullFloat64)(p)
}
}
func makeNullInt64PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*sql.NullInt64)(p)
}
}
func makeNullStringPtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*sql.NullString)(p)
}
}
func makeNullTimePtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*pq.NullTime)(p)
}
}
//--------------------------------------------------------------------------
func makeTimePtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*time.Time)(p)
}
}
func makeByteSlicePtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*[]byte)(p)
}
}
//--------------------------------------------------------------------------
func makeBoolPtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*bool)(p)
}
}
func makeStringPtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*string)(p)
}
}
//--------------------------------------------------------------------------
func makeFloat32PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*float32)(p)
}
}
func makeFloat64PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*float64)(p)
}
}
//--------------------------------------------------------------------------
func makeIntPtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*int)(p)
}
}
func makeInt8PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*int8)(p)
}
}
func makeInt16PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*int16)(p)
}
}
func makeInt32PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*int32)(p)
}
}
func makeInt64PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*int64)(p)
}
}
//--------------------------------------------------------------------------
func makeUintPtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*uint)(p)
}
}
func makeUint8PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*uint8)(p)
}
}
func makeUint16PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*uint16)(p)
}
}
func makeUint32PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*uint32)(p)
}
}
func makeUint64PtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*uint64)(p)
}
}
//--------------------------------------------------------------------------
func makeInt64SlicePtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*[]int64)(p)
}
}
func makeStringSlicePtrGetter(offset uintptr) func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
return func(structPtr unsafe.Pointer, ifacePtr *interface{}) {
p := unsafe.Pointer(uintptr(structPtr) + offset)
*ifacePtr = (*[]string)(p)
}
}
|
package symmetric
import (
"encoding/base64"
"fmt"
"testing"
)
func TestAesCrypt_Encrypt(t *testing.T) {
key := "kLieko0EWllskjeWkLieko0EWllskjeW"
value := "hello world"
aesCipher := AesCrypt{
Encrypter: Encrypter{
Format: "base64",
DecodeFunc: base64.StdEncoding.DecodeString,
EncodeFunc: base64.StdEncoding.EncodeToString,
},
}
encrypt, err := aesCipher.Encrypt(key, value)
if err != nil {
t.Fail()
}
fmt.Println(aesCipher.EncodeFunc(encrypt))
}
func TestAesCrypt_Decrypt(t *testing.T) {
key := "kLieko0EWllskjeWkLieko0EWllskjeW"
raw := "eSFvkh0qejaCwdIlpV8DwQ=="
aesCipher := AesCrypt{
Encrypter: Encrypter{
Format: "base64",
DecodeFunc: base64.StdEncoding.DecodeString,
EncodeFunc: base64.StdEncoding.EncodeToString,
},
}
rawBytes, err := aesCipher.DecodeFunc(raw)
if err != nil {
t.Fail()
}
result, err := aesCipher.Decrypt(key, rawBytes)
if err != nil {
t.Fail()
}
fmt.Println(string(result))
}
|
package main
import (
"alro/config"
"alro/server"
"fmt"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"google.golang.org/grpc"
"net"
"os"
)
var log *logrus.Logger
func main() {
logrus.SetFormatter(&logrus.JSONFormatter{})
log = logrus.StandardLogger()
alro := config.Alro
g, err := server.NewGRPCServer()
if err != nil {
log.Println("End gRPC server", "err", err)
os.Exit(1)
}
startServer(alro, g)
}
func startServer(alro *viper.Viper, g *grpc.Server) {
address := fmt.Sprintf("0.0.0.0:%d", alro.GetInt("port"))
log.Println(address)
log.Println("Start ALRO!!!")
l, err := net.Listen("tcp", address)
if err != nil {
log.Println("End to bind for gRPC server", "err", err)
os.Exit(1)
}
if err := g.Serve(l); err != nil {
log.Println("End gRPC server", "err", err)
os.Exit(1)
}
}
|
package db
import (
mydb"FILE_STORE/db/mysql"
"database/sql"
"fmt"
)
// 存储文件到数据库中
func OnFileUploadedFinished(filehash string, filename string, filesize int64, fileaddr string ) bool {
stmt, err := mydb.DBConn().Prepare("insert ignore into tbl_file (`file_sha1`, `file_name`, `file_size`, `file_addr`, `status`) values (?,?,?,?,1)")
if err != nil {
fmt.Println("Failed to prepare statement,err:", err.Error())
return false
}
defer stmt.Close()
ret, err := stmt.Exec(filehash, filename, filesize, fileaddr)
if err != nil {
fmt.Println(err.Error())
return false
}
// 判断是否重复插入
// 是否返回一条新的表记录
if rf, err := ret.RowsAffected(); nil==err {
if rf <= 0 {
fmt.Println("File with hash:%s has been uploaded before!", filehash)
}
return true
}
return false
}
type FileTable struct {
FileHash string
FileName sql.NullString
FileSize sql.NullInt64
FileAddr sql.NullString
}
func GetFile(filehash string) (*FileTable, error) {
stmt, err := mydb.DBConn().Prepare("select file_sha1, file_name, file_size, file_addr from tbl_file " +
"where file_sha1=? and status=1 limit 1")
if err != nil {
fmt.Println(err.Error())
return nil, err
}
defer stmt.Close()
fileTable := FileTable{}
err = stmt.QueryRow(filehash).Scan(&fileTable.FileHash, &fileTable.FileName, &fileTable.FileSize, &fileTable.FileAddr)
if err != nil {
fmt.Println(err.Error())
return nil, err
}
return &fileTable, nil
}
|
// This file is subject to a 1-clause BSD license.
// Its contents can be found in the enclosed LICENSE file.
package evdev
// Synchronization event values are undefined.
// Their usage is defined only by when they are
// sent in the evdev event stream.
//
// SynReport is used to synchronize and separate
// events into packets of input data changes occurring
// at the same moment in time. For example, motion
// of a mouse may set the RelX and RelY values for
// one motion, then emit a SynReport. The next motion
// will emit more RelX and RelY values and send
// another SynReport.
//
// SynConfig: to be determined.
//
// SynMTReport is used to synchronize and separate
// touch events. See the multi-touch-protocol.txt
// document for more information.
//
// SynDropped is used to indicate buffer overrun
// in the evdev client's event queue.
// Client should ignore all events up to and
// including next SynReport event and query the
// device (using EVIOCG* ioctls) to obtain its current state.
const (
SynReport = iota
SynConfig
SynMTReport
SynDropped
)
|
package shipping_details_test
import (
shippingDetails "Pinjem/businesses/shipping_details"
"Pinjem/businesses/shipping_details/mocks"
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
var shippingDetailRepository mocks.DomainRepository
var shippingDetailService shippingDetails.DomainService
var shippingDetailDomain shippingDetails.Domain
func setup() {
shippingDetailService = shippingDetails.NewUsecase(&shippingDetailRepository, time.Minute*15)
shippingDetailDomain = shippingDetails.Domain{
Id: 1,
OrderId: 1,
DestProvinsi: "Jawa Barat",
DestKota: "Bandung",
DestKecamatan: "Cibadak",
DestDesa: "Cibadak",
DestAddress: "Jl. Cibadak",
DestPostalCode: "40132",
ShippingCost: 9000,
}
}
func TestGetAllShippingDetails(t *testing.T) {
setup()
shippingDetailRepository.On("GetAll", mock.Anything).Return([]shippingDetails.Domain{shippingDetailDomain}, nil)
t.Run("Test Case 1 | Valid Get All Shipping Details", func(t *testing.T) {
shippingDetails, err := shippingDetailService.GetAll(context.Background())
if err != nil {
t.Errorf("Error: %v", err)
}
if len(shippingDetails) < 1 {
t.Errorf("Error: %v", "No Shipping Details")
}
assert.NoError(t, err)
assert.Equal(t, 1, len(shippingDetails))
})
}
func TestGetShippingDetailById(t *testing.T) {
setup()
shippingDetailRepository.On("GetById", mock.Anything, mock.AnythingOfType("uint")).Return(shippingDetailDomain, nil)
t.Run("Test Case 1 | Valid Get Shipping Detail By Id", func(t *testing.T) {
shippingDetail, err := shippingDetailService.GetById(context.Background(), 1)
if err != nil {
t.Errorf("Error: %v", err)
}
if shippingDetail.Id == 0 {
t.Errorf("Error: %v", "No Shipping Details")
}
assert.NoError(t, err)
assert.Equal(t, uint(1), shippingDetail.Id)
})
}
func TestGetShippingDetailByOrderId(t *testing.T) {
setup()
shippingDetailRepository.On("GetByOrderId", mock.Anything, mock.Anything).Return(shippingDetailDomain, nil)
t.Run("Test Case 1 | Valid Get Shipping Detail By Order Id", func(t *testing.T) {
shippingDetail, err := shippingDetailService.GetByOrderId(context.Background(), 1)
if err != nil {
t.Errorf("Error: %v", err)
}
if shippingDetail.Id < 1 {
t.Errorf("Error: %v", "No Shipping Details")
}
assert.NoError(t, err)
assert.Equal(t, uint(1), shippingDetail.Id)
})
}
func TestCreateShippingDetail(t *testing.T) {
setup()
shippingDetailRepository.On("Create", mock.Anything, mock.AnythingOfType("Domain")).Return(shippingDetailDomain, nil)
t.Run("Test Case 1 | Valid Create Shipping Detail", func(t *testing.T) {
shippingDetail, err := shippingDetailService.Create(context.Background(), shippingDetailDomain)
if err != nil {
t.Errorf("Error: %v", err)
}
if shippingDetail.Id < 1 {
t.Errorf("Error: %v", "No Shipping Details")
}
assert.NoError(t, err)
assert.Equal(t, shippingDetailDomain, shippingDetail)
})
}
func TestDeleteShippingDetail(t *testing.T) {
setup()
shippingDetailRepository.On("Delete", mock.Anything, mock.AnythingOfType("uint")).Return(nil)
t.Run("Test Case 1 | Valid Delete Shipping Detail", func(t *testing.T) {
err := shippingDetailService.Delete(context.Background(), 1)
if err != nil {
t.Errorf("Error: %v", err)
}
assert.NoError(t, err)
})
}
func TestDeleteShippingDetailByOrderId(t *testing.T) {
setup()
shippingDetailRepository.On("DeleteByOrderId", mock.Anything, mock.AnythingOfType("uint")).Return(nil)
t.Run("Test Case 1 | Valid Delete Shipping Detail By Order Id", func(t *testing.T) {
err := shippingDetailService.DeleteByOrderId(context.Background(), 1)
if err != nil {
t.Errorf("Error: %v", err)
}
assert.NoError(t, err)
})
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"encoding/json"
"fmt"
"github.com/coreos/etcd/client"
"log"
"os"
"strings"
"time"
_ "github.com/lib/pq"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/golang/glog"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"io/ioutil"
"k8s.io/client-go/tools/clientcmd"
"io"
"net/http"
"bytes"
"compress/gzip"
"github.com/mholt/archiver"
operatorv1 "github.com/cloud-ark/kubeplus/operator-manager/pkg/apis/operatorcontroller/v1"
clientset "github.com/cloud-ark/kubeplus/operator-manager/pkg/client/clientset/versioned"
operatorscheme "github.com/cloud-ark/kubeplus/operator-manager/pkg/client/clientset/versioned/scheme"
informers "github.com/cloud-ark/kubeplus/operator-manager/pkg/client/informers/externalversions"
listers "github.com/cloud-ark/kubeplus/operator-manager/pkg/client/listers/operatorcontroller/v1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
)
const controllerAgentName = "operator-controller"
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
SuccessSynced = "Synced"
// ErrResourceExists is used as part of the Event 'reason' when a Foo fails
// to sync due to a Deployment of the same name already existing.
ErrResourceExists = "ErrResourceExists"
// MessageResourceExists is the message used for Events when a resource
// fails to sync due to a Deployment already existing
MessageResourceExists = "Resource %q already exists and is not managed by Foo"
// MessageResourceSynced is the message used for an Event fired when a Foo
// is synced successfully
MessageResourceSynced = "Foo synced successfully"
)
var (
etcdServiceURL string
openAPISpecKey = "/openAPISpecRegistered"
operatorsToDeleteKey = "/operatorsToDelete"
operatorsToInstallKey = "/operatorsToInstall"
chartValuesKey = "/chartvalues"
)
func init() {
etcdServiceURL = "http://localhost:2379"
}
// Controller is the controller implementation for Foo resources
type Controller struct {
// kubeclientset is a standard kubernetes clientset
kubeclientset kubernetes.Interface
// sampleclientset is a clientset for our own API group
sampleclientset clientset.Interface
deploymentsLister appslisters.DeploymentLister
deploymentsSynced cache.InformerSynced
foosLister listers.OperatorLister
foosSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
}
// NewController returns a new sample controller
func NewController(
kubeclientset kubernetes.Interface,
sampleclientset clientset.Interface,
kubeInformerFactory kubeinformers.SharedInformerFactory,
sampleInformerFactory informers.SharedInformerFactory) *Controller {
// obtain references to shared index informers for the Deployment and Foo
// types.
deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
fooInformer := sampleInformerFactory.Operatorcontroller().V1().Operators()
// Create event broadcaster
// Add operator-controller types to the default Kubernetes Scheme so Events can be
// logged for operator-controller types.
operatorscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
controller := &Controller{
kubeclientset: kubeclientset,
sampleclientset: sampleclientset,
deploymentsLister: deploymentInformer.Lister(),
deploymentsSynced: deploymentInformer.Informer().HasSynced,
foosLister: fooInformer.Lister(),
foosSynced: fooInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Operators"),
recorder: recorder,
}
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueFoo,
UpdateFunc: func(old, new interface{}) {
newDepl := new.(*operatorv1.Operator)
oldDepl := old.(*operatorv1.Operator)
//fmt.Println("New Version:%s", newDepl.ResourceVersion)
//fmt.Println("Old Version:%s", oldDepl.ResourceVersion)
if newDepl.ResourceVersion == oldDepl.ResourceVersion {
// Periodic resync will send update events for all known Deployments.
// Two different versions of the same Deployment will always have different RVs.
return
} else {
controller.enqueueFoo(new)
}
},
DeleteFunc: func(obj interface{}) {
//_, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
//if err == nil {
controller.deleteOperator(obj)
//}
},
})
return controller
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Foo controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.Info("Starting workers")
// Launch two workers to process Foo resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(key); err != nil {
return fmt.Errorf("error syncing '%s': %s", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
glog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
runtime.HandleError(err)
return true
}
return true
}
// enqueueFoo takes a Foo resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than Foo.
func (c *Controller) enqueueFoo(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
runtime.HandleError(err)
return
}
c.workqueue.AddRateLimited(key)
}
// handleObject will take any resource implementing metav1.Object and attempt
// to find the Foo resource that 'owns' it. It does this by looking at the
// objects metadata.ownerReferences field for an appropriate OwnerReference.
// It then enqueues that Foo resource to be processed. If the object does not
// have an appropriate OwnerReference, it will simply be skipped.
func (c *Controller) handleObject(obj interface{}) {
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
runtime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
}
glog.V(4).Infof("Processing object: %s", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
// If this object is not owned by a Foo, we should not do anything more
// with it.
if ownerRef.Kind != "Foo" {
return
}
foo, err := c.foosLister.Operators(object.GetNamespace()).Get(ownerRef.Name)
if err != nil {
glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
return
}
c.enqueueFoo(foo)
return
}
}
func (c *Controller) deleteOperator(obj interface{}) {
var err error
if _, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
panic(err)
}
foo := obj.(*operatorv1.Operator)
fmt.Println("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
operatorName := foo.Spec.Name
fmt.Printf("Operator to delete:%s\n", operatorName)
operatorChartURL := foo.Spec.ChartURL
storeChartURL(operatorsToDeleteKey, operatorChartURL)
deleteConfigMap(operatorChartURL, c.kubeclientset)
deleteChartURL(openAPISpecKey, operatorChartURL)
c.workqueue.Forget(obj)
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) syncHandler(key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the Foo resource with this namespace/name
foo, err := c.foosLister.Operators(namespace).Get(name)
if err != nil {
// The Foo resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
return nil
}
return err
}
fmt.Println("**************************************")
operatorName := foo.Spec.Name
operatorChartURL := foo.Spec.ChartURL
operatorChartValues := foo.Spec.Values
fmt.Printf("Operator Name:%s\n", operatorName)
fmt.Printf("Chart URL:%s\n", operatorChartURL)
fmt.Printf("Values:%v\n", operatorChartValues)
storeChartURL(operatorsToInstallKey, operatorChartURL)
storeEtcd(chartValuesKey+"/"+operatorChartURL, operatorChartValues)
var operatorCRDString string
for {
operatorCRDString = getOperatorCRDs(operatorChartURL)
if operatorCRDString != "" {
break
}
time.Sleep(time.Second * 5)
}
//fmt.Printf("OperatorCRDString:%s\n", operatorCRDString)
crds := make([]string, 0)
if err := json.Unmarshal([]byte(operatorCRDString), &crds); err != nil {
panic(err)
}
fmt.Printf("OperatorCRD:%v\n", crds)
fmt.Println("Checking if OpenAPI Spec for the Operator is registered or not")
openAPISpecRegistered := isOpenAPISpecRegistered(operatorChartURL)
if !openAPISpecRegistered {
fmt.Println("OpenAPI Spec for the Operator not registered.")
operatorName, _ := parseChartNameVersion(operatorChartURL)
operatorOpenAPIConfigMapName := uploadOperatorOpenAPISpec(operatorChartURL, c.kubeclientset)
saveOperatorData(operatorName, crds, operatorOpenAPIConfigMapName)
recordOpenAPISpecRegistration(operatorChartURL)
status := "READY"
c.updateFooStatus(foo, &crds, status)
} else {
fmt.Println("OpenAPI Spec for the Operator already registered.")
}
c.recorder.Event(foo, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced)
return nil
}
func isOpenAPISpecRegistered(operatorChartURL string) bool {
operatorPresent := checkIfOperatorURLPresentInETCD(openAPISpecKey, operatorChartURL)
return operatorPresent
}
func recordOpenAPISpecRegistration(operatorChartURL string) {
storeChartURL(openAPISpecKey, operatorChartURL)
}
func saveOperatorData(operatorName string, crds []string, operatorOpenAPIConfigMapName string) {
resourceKey := "/operators"
var operatorDataMap = make(map[string]interface{})
var operatorMap = make(map[string]map[string]interface{})
var operatorMapList = make([]map[string]map[string]interface{}, 0)
operatorDataMap["Name"] = operatorName
operatorDataMap["CustomResources"] = crds
operatorDataMap["ConfigMapName"] = operatorOpenAPIConfigMapName
operatorMap["Operator"] = operatorDataMap
operatorMapList = append(operatorMapList, operatorMap)
storeEtcd(resourceKey, operatorMapList)
cfg, err := clientcmd.BuildConfigFromFlags("", "")
if err != nil {
panic(err)
}
crdclient, err := apiextensionsclientset.NewForConfig(cfg)
for _, crdName := range crds {
crdObj, err := crdclient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crdName, metav1.GetOptions{})
if err != nil {
fmt.Errorf("Error:%s\n", err)
}
group := crdObj.Spec.Group
version := crdObj.Spec.Version
endpoint := "apis/" + group + "/" + version
kind := crdObj.Spec.Names.Kind
plural := crdObj.Spec.Names.Plural
fmt.Printf("Group:%s, Version:%s, Kind:%s, Plural:%s, Endpoint:%s\n", group, version, kind, plural, endpoint)
objectMeta := crdObj.ObjectMeta
fmt.Printf("Object Meta:%v\n", objectMeta)
//name := objectMeta.GetName()
//namespace := objectMeta.GetNamespace()
annotations := objectMeta.GetAnnotations()
composition := annotations["composition"]
var crdDetailsMap = make(map[string]interface{})
crdDetailsMap["kind"] = kind
crdDetailsMap["endpoint"] = endpoint
crdDetailsMap["plural"] = plural
crdDetailsMap["composition"] = composition
//crdName := "postgreses.postgrescontroller.kubeplus"
storeEtcd("/"+crdName, crdDetailsMap)
storeEtcd("/"+kind+"-OpenAPISpecConfigMap", operatorOpenAPIConfigMapName)
}
}
func storeEtcd(resourceKey string, resourceData interface{}) {
jsonData, err := json.Marshal(&resourceData)
if err != nil {
panic(err)
}
jsonDataString := string(jsonData)
cfg := client.Config{
Endpoints: []string{etcdServiceURL},
Transport: client.DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
log.Fatal(err)
}
kapi := client.NewKeysAPI(c)
//fmt.Printf("Setting %s->%s\n",resourceKey, jsonDataString)
_, err1 := kapi.Set(context.Background(), resourceKey, jsonDataString, nil)
if err1 != nil {
log.Fatal(err1)
} else {
//log.Printf("Set is done. Metadata is %q\n", resp.Node.Value)
}
}
func uploadOperatorOpenAPISpec(chartURL string, kubeclientset kubernetes.Interface) string {
extractOperatorChart(chartURL)
chartConfigMapName := createConfigMap(chartURL, kubeclientset)
return chartConfigMapName
}
func extractOperatorChart(chartURL string) {
chartName, _ := parseChartNameVersion(chartURL)
chartTarFile := chartName + ".tar"
fmt.Printf("Chart tgz file name:%s\n", chartTarFile)
out, err := os.Create(chartTarFile)
defer out.Close()
if err != nil {
log.Fatal(err)
}
resp, err := http.Get(chartURL)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
var buf bytes.Buffer
buf1, err1 := ioutil.ReadAll(resp.Body)
if err1 != nil {
log.Fatal(err1)
}
buf.Write(buf1)
zr, err := gzip.NewReader(&buf)
if err != nil {
log.Fatal(err)
}
fmt.Println("Read tgz file in buffer")
fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC())
if _, err := io.Copy(out, zr); err != nil {
log.Fatal(err)
}
fmt.Println("Saving tgz buffer to file")
if err := zr.Close(); err != nil {
log.Fatal(err)
}
currentDir, err := os.Getwd()
dirName := currentDir + "/" + chartName
fmt.Printf("Chart tar file downloaded to:%s\n", dirName)
_, err = os.Stat(dirName)
if os.IsNotExist(err) {
fmt.Printf("%s does not exist\n", dirName)
err = os.Mkdir(dirName, 0755)
if err != nil {
log.Fatal(err)
}
}
fmt.Println("Untaring the Chart")
err = archiver.Tar.Open(chartTarFile, dirName)
if err != nil {
log.Fatal(err)
}
os.Chdir(dirName)
os.Chdir(chartName)
cwd, _ := os.Getwd()
//fmt.Println("Listing %s", cwd)
files, err := ioutil.ReadDir(cwd)
if err != nil {
log.Fatal(err)
}
for _, f := range files {
fmt.Println(f.Name())
}
os.Chdir(currentDir)
}
func deleteConfigMap(chartURL string, kubeclientset kubernetes.Interface) {
chartName, _ := parseChartNameVersion(chartURL)
err1 := kubeclientset.CoreV1().ConfigMaps("default").Delete(chartName, &metav1.DeleteOptions{})
if err1 != nil {
fmt.Printf("Error:%s\n", err1.Error())
}
}
func createConfigMap(chartURL string, kubeclientset kubernetes.Interface) string {
chartName, _ := parseChartNameVersion(chartURL)
currentDir, err := os.Getwd()
//dirName := currentDir + "tmp/charts/" + chartName
dirName := currentDir + "/" + chartName
os.Chdir(dirName)
os.Chdir(chartName)
cwd, _ := os.Getwd()
fmt.Printf("dirName:%s\n", cwd)
//openapispecFile := dirName + "/openapispec.json"
openapispecFile := "openapispec.json"
//fmt.Printf("OpenAPI Spec file:%s\n", openapispecFile)
jsonContents, err := ioutil.ReadFile(openapispecFile)
jsonContents1 := string(jsonContents)
if err != nil {
log.Fatal(err)
}
//fmt.Printf("OpenAPISpec Contents:%s\n", jsonContents1)
os.Chdir(currentDir)
configMapToCreate := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: chartName,
},
Data: map[string]string{
"openapispec": jsonContents1,
},
}
_, err1 := kubeclientset.CoreV1().ConfigMaps("default").Create(configMapToCreate)
if err1 != nil {
fmt.Printf("Error:%s\n", err1.Error())
}
return chartName
}
func parseChartNameVersion(chartURL string) (string, string) {
//"https://s3-us-west-2.amazonaws.com/cloudark-helm-charts/postgres-crd-v2-chart-0.0.2.tgz"
// 1. Split on '/'
// 2. Split on 'tgz'
// 3. Find last '/' -- Everything before is chartName everything after is version
splitOnSlash := strings.Split(chartURL, "/")
lastItem := splitOnSlash[len(splitOnSlash)-1]
fmt.Printf("Last item:%s\n", lastItem)
splitOnTgz := strings.Split(lastItem, ".tgz")
candidate := splitOnTgz[0]
fmt.Printf("Candidate:%s\n", candidate)
nameVersionSplitIndex := strings.LastIndex(candidate, "-")
versionStartIndex := nameVersionSplitIndex + 1
version := candidate[versionStartIndex:]
fmt.Printf("Version:%s\n", version)
nameEndIndex := nameVersionSplitIndex
name := candidate[0:nameEndIndex]
fmt.Printf("Name:%s\n", name)
return name, version
}
func deleteChartURL(resourceKey, chartURL string) {
cfg := client.Config{
Endpoints: []string{etcdServiceURL},
Transport: client.DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
fmt.Errorf("Error: %v", err)
}
kapi := client.NewKeysAPI(c)
operatorList := getList(resourceKey)
var newList []string
for _, operatorURL := range operatorList {
if operatorURL != chartURL {
newList = append(newList, operatorURL)
}
}
jsonOperatorList, err2 := json.Marshal(&newList)
if err2 != nil {
panic(err2)
}
resourceValue := string(jsonOperatorList)
//fmt.Printf("Setting %s->%s\n",resourceKey, resourceValue)
_, err1 := kapi.Set(context.Background(), resourceKey, resourceValue, nil)
if err1 != nil {
log.Fatal(err)
} else {
//log.Printf("Set is done. Metadata is %q\n", resp.Node.Value)
}
}
func storeChartURL(resourceKey, chartURL string) {
addToList(resourceKey, chartURL)
}
func checkIfOperatorURLPresentInETCD(resourceKey, chartURL string) bool {
operatorList := getList(resourceKey)
operatorPresent := false
for _, ops := range operatorList {
if ops == chartURL {
operatorPresent = true
}
}
return operatorPresent
}
func getOperatorCRDs(chartURL string) string {
operatorCRDString := getSingleValue(chartURL)
return operatorCRDString
}
func addToList(resourceKey, chartURL string) {
cfg := client.Config{
Endpoints: []string{etcdServiceURL},
Transport: client.DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
fmt.Errorf("Error: %v", err)
}
kapi := client.NewKeysAPI(c)
var operatorList []string
operatorList = getList(resourceKey)
operatorPresent := checkIfOperatorURLPresentInETCD(resourceKey, chartURL)
if !operatorPresent {
operatorList = append(operatorList, chartURL)
}
jsonOperatorList, err2 := json.Marshal(&operatorList)
if err2 != nil {
panic(err2)
}
resourceValue := string(jsonOperatorList)
//fmt.Printf("Setting %s->%s\n",resourceKey, resourceValue)
_, err1 := kapi.Set(context.Background(), resourceKey, resourceValue, nil)
if err1 != nil {
log.Fatal(err)
} else {
//log.Printf("Set is done. Metadata is %q\n", resp.Node.Value)
}
}
func getList(resourceKey string) []string {
cfg := client.Config{
Endpoints: []string{etcdServiceURL},
Transport: client.DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
fmt.Errorf("Error: %v", err)
}
kapi := client.NewKeysAPI(c)
var currentListString string
var operatorList []string
resp, err1 := kapi.Get(context.Background(), resourceKey, nil)
if err1 != nil {
fmt.Errorf("Error: %v", err1)
} else {
currentListString = resp.Node.Value
if err = json.Unmarshal([]byte(currentListString), &operatorList); err != nil {
panic(err)
}
//fmt.Printf("OperatorList:%v\n", operatorList)
}
return operatorList
}
func getSingleValue(resourceKey string) string {
cfg := client.Config{
Endpoints: []string{etcdServiceURL},
Transport: client.DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
log.Fatal(err)
}
kapi := client.NewKeysAPI(c)
resp, err1 := kapi.Get(context.Background(), resourceKey, nil)
if err1 != nil {
fmt.Errorf("Error: %v", err1)
return ""
} else {
//log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
}
return resp.Node.Value
}
func (c *Controller) updateFooStatus(foo *operatorv1.Operator,
crds *[]string, status string) error {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
fooCopy := foo.DeepCopy()
fooCopy.Status.CustomResourceDefinitions = *crds
fooCopy.Status.Status = status
// Until #38113 is merged, we must use Update instead of UpdateStatus to
// update the Status block of the Foo resource. UpdateStatus will not
// allow changes to the Spec of the resource, which is ideal for ensuring
// nothing other than resource status has been updated.
_, err := c.sampleclientset.OperatorcontrollerV1().Operators(foo.Namespace).Update(fooCopy)
if err != nil {
fmt.Println("ERROR in UpdateFooStatus %v", err)
}
return err
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//89. Gray Code
//The gray code is a binary numeral system where two successive values differ in only one bit.
//Given a non-negative integer n representing the total number of bits in the code, print the sequence of gray code. A gray code sequence must begin with 0.
//For example, given n = 2, return [0,1,3,2]. Its gray code sequence is:
//00 - 0
//01 - 1
//11 - 3
//10 - 2
//Note:
//For a given n, a gray code sequence is not uniquely defined.
//For example, [0,2,3,1] is also a valid gray code sequence according to the above definition.
//For now, the judge is able to judge based on one instance of gray code sequence. Sorry about that.
//func grayCode(n int) []int {
//}
// Time Is Money
|
package bmredis
import (
"github.com/alfredyang1986/blackmirror/bmerror"
"github.com/go-redis/redis"
"os"
"strconv"
"sync"
)
var onceConfig sync.Once
var redisClient *redis.Client
func GetRedisClient() *redis.Client {
onceConfig.Do(func() {
host := os.Getenv("BM_REDIS_HOST")
port := os.Getenv("BM_REDIS_PORT")
password := os.Getenv("BM_REDIS_PASS")
dbStr := os.Getenv("BM_REDIS_DB")
db, err := strconv.Atoi(dbStr)
bmerror.PanicError(err)
addr := host + ":" + port
client := redis.NewClient(&redis.Options{
Addr: addr,
Password: password, // no password set
DB: db, // use default DB
})
redisClient = client
})
return redisClient
}
|
package main
import "fmt"
import "strings"
import "sort"
import "reflect"
func _Sort(str string) []string {
str = strings.Trim(str, " ")
str = strings.Replace(str, " ", "", -1)
strs := strings.Split(str, "")
sort.Strings(strs)
return strs
}
func IsAnagram(str1 string, str2 string) bool {
strs1 := _Sort(str1)
strs2 := _Sort(str2)
if reflect.DeepEqual(strs1, strs2) == false {
return false
}
return true
}
func main() {
var str1 string = "anagrams"
var str2 string = "ars magna"
// 文字列を逆にする
fmt.Println(IsAnagram(str1, str2))
}
|
package storage
import (
"io"
"time"
)
type StorageEntry struct {
Title string
Path string
IsDir bool
Updated time.Time
MimeType string
}
type Storage interface {
List(path string) ([]StorageEntry, error)
IsDownloadable(path string) (bool, error)
Download(w io.Writer, path string) error
}
|
package ticker
import (
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
"strconv"
)
func TestTicker(t *testing.T) {
ticker := NewManager()
Convey("TestTickerManager", t, func() {
var result int
key := "test1"
Convey("AddTicker", func() {
// 测试没有ticker 去判断是否存在
So(ticker.HasTicker(key), ShouldEqual, false)
// 取消一个不存在的定时器
ticker.CancelTicker(key)
// 添加ticker
ticker.AddTicker(key, 1 * time.Second, func() {
t.Logf("ticker: %s", key)
result++
})
So(ticker.HasTicker(key), ShouldEqual, true)
time.Sleep(2 * time.Second)
})
Convey("CancelTimer", func() {
// 取消ticker
ticker.CancelTicker(key)
result = 0
So(ticker.HasTicker(key), ShouldEqual, false)
time.Sleep(2 * time.Second)
So(result == 0, ShouldEqual, true)
})
Convey("ResetTimer", func() {
// 添加多个ticker,然后重置
for i := 1; i < 5; i++ {
ticker.AddTicker(key + strconv.Itoa(i), 1 * time.Second, func(){
t.Errorf("should not be execute")
})
}
ticker.Reset()
for i := 1; i < 5; i++ {
So(ticker.HasTicker(key + strconv.Itoa(i)), ShouldEqual, false)
}
})
})
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"tptsreporter/grafana"
"tptsreporter/report"
"github.com/form3tech-oss/jwt-go"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
"github.com/pborman/uuid"
"gorm.io/gorm"
)
func handlerReporterHome(w http.ResponseWriter, req *http.Request) {
//dts := time.Now()
//tD := dts.Format("2006-01-02 15:04:05.000")
//fmt.Fprintf(w, tD+"\nWelcome, This is TPTS Reporter Home Page")
aD.Host = req.Host
loadPage(`home`, w)
}
func handlerReporterTest(w http.ResponseWriter, req *http.Request) {
log.Println("Test handler")
var v = struct {
Host string
Route string
}{
cfg.Server.Host,
cfg.Server.Route,
}
//logTempl.Execute(w, &v)
tmplts.ExecuteTemplate(w, "test.html", &v)
//log.Println(err)
}
func handlerReporterLog(w http.ResponseWriter, r *http.Request) {
log.Println("Log handler")
/*
dts := time.Now()
tD := dts.Format("2006-01-02 15:04:05.000")
fmt.Fprintf(w, tD+"\nThis is TPTS Reporter Log Page \nLog File:\n"+string(readFileLog()))
*/
/*
if r.URL.Path != "/" {
http.Error(w, "Not found", http.StatusNotFound)
return
}
*/
if r.Method != "GET" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
p, lastMod, err := readFileIfModified(time.Time{})
if err != nil {
p = []byte(err.Error())
lastMod = time.Unix(0, 0)
}
var v = struct {
Host string
Route string
Data string
LastMod string
}{
cfg.Server.Host,
cfg.Server.Route,
string(p),
strconv.FormatInt(lastMod.UnixNano(), 16),
}
//logTempl.Execute(w, &v)
tmplts.ExecuteTemplate(w, "logws.html", &v)
//log.Println(err)
}
func handlerReporterLogWs(w http.ResponseWriter, r *http.Request) {
log.Println("Log handler ws")
r.Header["Upgrade"] = []string{"websocket"}
r.Header["Connection"] = []string{"Upgrade"}
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
if _, ok := err.(websocket.HandshakeError); !ok {
log.Println(err)
}
return
}
var lastMod time.Time
if n, err := strconv.ParseInt(r.FormValue("lastMod"), 16, 64); err == nil {
lastMod = time.Unix(0, n)
}
go writer(ws, lastMod)
reader(ws)
}
func writer(ws *websocket.Conn, lastMod time.Time) {
//log.Println("file writer in")
lastError := ""
pingTicker := time.NewTicker(pingPeriod)
fileTicker := time.NewTicker(filePeriod)
defer func() {
pingTicker.Stop()
fileTicker.Stop()
ws.Close()
}()
//tick := time.Now()
for {
/*
if (time.Now().Unix() - tick.Unix()) > int64((time.Duration(1) * time.Second).Seconds()) {
log.Println("writer loop")
tick = time.Now()
}
*/
select {
case <-fileTicker.C:
var p []byte
var err error
p, lastMod, err = readFileIfModified(lastMod)
if err != nil {
if s := err.Error(); s != lastError {
lastError = s
p = []byte(lastError)
}
} else {
lastError = ""
}
if p != nil {
ws.SetWriteDeadline(time.Now().Add(writeWait))
if err := ws.WriteMessage(websocket.TextMessage, p); err != nil {
return
}
}
case <-pingTicker.C:
ws.SetWriteDeadline(time.Now().Add(writeWait))
if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
return
}
}
}
}
func readFileIfModified(lastMod time.Time) ([]byte, time.Time, error) {
fi, err := os.Stat("logTPTSReporter.log")
if err != nil {
return nil, lastMod, err
}
if !fi.ModTime().After(lastMod) {
return nil, lastMod, nil
}
//log.Println("file modified")
p, err := ioutil.ReadFile("logTPTSReporter.log")
if err != nil {
return nil, fi.ModTime(), err
}
return p, fi.ModTime(), nil
}
func reader(ws *websocket.Conn) {
//log.Println("file reader in")
defer ws.Close()
ws.SetReadLimit(512)
ws.SetReadDeadline(time.Now().Add(pongWait))
ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })
//tick := time.Now()
for {
/*
if (time.Now().Unix() - tick.Unix()) > int64((time.Duration(1) * time.Second).Seconds()) {
log.Println("reader loop")
tick = time.Now()
}
*/
_, _, err := ws.ReadMessage()
if err != nil {
break
}
}
}
func hv2w(h ServeReportHandler, w http.ResponseWriter, req *http.Request) {
ws, p, err := checkWebSocket(w, req)
if err != nil {
log.Println(err)
} else {
log.Println("V2 Web Socket...")
type dataJSON struct {
Org report.Organization
Token string
}
var data dataJSON
json.Unmarshal(p, &data)
org := data.Org
if verifyJWT(data.Token) {
dts := time.Now()
sdb := report.StateReportGenerationDB{
UUID: aD.UUID,
TimestampStart: dts.UnixNano(),
IDOrg: org.ID,
NameOrg: org.Name,
}
processWebSocket(&sdb, dts, h, ws, w, req)
} else {
log.Println("Authentication failure!!!, JWT Token Invalid")
}
}
}
func hv3h(h ServeReportHandler, w http.ResponseWriter, req *http.Request) {
//dts := time.Now()
//tD := dts.Format("2006-01-02 15:04:05.000")
//fmt.Fprintf(w, tD+"\nWelcome, This is TPTS Reporter Home Page")
if isAuthorized(req) {
splitURL := strings.Split(req.URL.RequestURI(), "/")
aD.UUID = uuid.New()
aD.URL = "/v3/w/api/" + splitURL[4]
aD.Host = cfg.Server.Host
aD.Route = cfg.Server.Route
dts := time.Now()
sdb := report.StateReportGenerationDB{
UUID: aD.UUID,
TimestampStart: dts.UnixNano(),
TimestampStateChange: dts.UnixNano(),
}
report.DB.Create(&sdb)
loadPage(`loadingws`, w)
} else {
log.Println(req.Host, req.URL.RequestURI()+"Authorization failed!, browser session cookie not found")
}
}
func hv3w(h ServeReportHandler, w http.ResponseWriter, req *http.Request) {
log.Println("Reporter version:3, on Web Socket.")
ws, p, err := checkWebSocket(w, req)
if err != nil {
log.Println(err)
return
}
type dataJSON struct {
UUID string
URL string
}
org := report.Organization{
ID: 3,
Name: "Volkommen",
}
log.Println(org)
var data dataJSON
json.Unmarshal(p, &data)
var sdb report.StateReportGenerationDB
resultDB := report.DB.Where("uuid = ?", data.UUID).First(&sdb)
if dbIsRecordFound(resultDB) && time.Now().UnixNano()-sdb.TimestampStart <= (time.Duration(30)*time.Second).Nanoseconds() {
log.Println("Client UUID Valid")
sdb.IDOrg = org.ID
sdb.NameOrg = org.Name
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
dts := time.Unix(sdb.TimestampStart/int64(math.Pow10(9)), sdb.TimestampStart%int64(math.Pow10(9)))
processWebSocket(&sdb, dts, h, ws, w, req)
} else {
log.Println("Authentication failure! Client UUID mismatch")
}
}
func processWebSocket(sdb *report.StateReportGenerationDB, dts time.Time, h ServeReportHandler, ws *websocket.Conn, w http.ResponseWriter, req *http.Request) {
wsState := 0
reportReady := isReportReady(sdb, req, dts)
diffTime := time.Duration(dts.UnixNano()-sdb.TimestampStateChange) * time.Nanosecond
if reportReady {
if sdb.TimestampStart > 0 && diffTime.Nanoseconds() > 0 {
log.Println("Same report was generated", diffTime.String(), "ago.")
}
log.Println("Report ready with Filename: ", sdb.NameFilePDF)
sendFileWS(ws, sdb)
} else {
//if sdb.State < 2 || (sdb.State >= 2 && diffTime > timeOut) {
if sdb.State < 2 && !sdb.TimedOut {
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
go routineInitReportGeneration(h, dts, sdb, req)
//go monitorDBState(sdb, dts, 0)
}
tick := time.Now()
for !sdb.ReadyToSendFile {
if sdb.State == 800 {
log.Println("Error in report generation!!!", sdb.Error)
log.Println("Websocket Closed!", sdb.State)
ws.Close()
return
}
if (time.Now().UnixNano() - sdb.TimestampStateChange) > int64(timeOut.Nanoseconds()) {
log.Println("Time Out!!!, Closing Websocket...", sdb.State)
ws.Close()
log.Println("Websocket Closed!", sdb.State)
sdb.TimedOut = true
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
return
}
timeElapsed := strconv.FormatInt(time.Now().Unix()-dts.Unix(), 10)
if time.Now().Unix()-tick.Unix() > int64((time.Duration(1) * time.Second).Seconds()) {
tick = time.Now()
log.Println("Monitoring state change:", sdb.Value+",", "Time elapsed:"+timeElapsed+"s.")
}
if wsState != sdb.State {
err = wsWriteMessage(ws, "Progress", strconv.Itoa(sdb.State))
if err != nil {
log.Println(err)
ws.Close()
}
}
wsState = sdb.State
}
sendFileWS(ws, sdb)
}
}
func sendFileWS(ws *websocket.Conn, sdb *report.StateReportGenerationDB) {
err = wsWriteMessage(ws, "Filename", sdb.NameFilePDF)
if err != nil {
log.Println(err)
ws.Close()
return
}
log.Println("File name sent to client. " + sdb.NameFilePDF)
/*
err = wsWriteFile(ws, filepath.Join(report.Dir.Cache, sdb.NameFilePDF))
if err != nil {
log.Println(err)
ws.Close()
return
}
*/
sdb.CompletedFileTransfer = true
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
log.Println("File is ready for loading at the client side. " + sdb.NameFilePDF)
ws.Close()
log.Println("Websocket Closed!")
}
func isReportReady(sdbNew *report.StateReportGenerationDB, req *http.Request, dts time.Time) bool {
org := report.Organization{
ID: sdbNew.IDOrg,
Name: sdbNew.NameOrg,
}
uidDash := getDashUID(req)
rangeTime := timeRange(req)
valueUnique := getUniqueValue(uidDash, rangeTime, org)
var sdb report.StateReportGenerationDB
resultDB := report.DB.Where("value = ?", valueUnique).First(&sdb)
fileReady := isFileExist(report.Dir.Cache, sdb.NameFilePDF)
if fileReady {
copyStateDB(&sdb, sdbNew)
sdbNew.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdbNew)
return true
} else if dbIsRecordFound(resultDB) && !fileReady {
log.Println("Previous report generation is incomplete!, regenerating...")
}
sdbNew.UIDDash = uidDash
sdbNew.Value = valueUnique
sdbNew.FromTS = rangeTime.FromTS
sdbNew.ToTS = rangeTime.ToTS
sdbNew.State = 1
sdbNew.Message = "Report generation began."
sdbNew.ReadyToSendFile = false
sdbNew.CompletedFileTransfer = false
sdbNew.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(&sdbNew)
return false
}
func copyStateDB(sdb *report.StateReportGenerationDB, sdbNew *report.StateReportGenerationDB) {
sdbNew.Value = sdb.Value
sdbNew.TitleDash = sdb.TitleDash
sdbNew.FromTS = sdb.FromTS
sdbNew.ToTS = sdb.ToTS
sdbNew.CountPanels = sdb.CountPanels
sdbNew.NameFilePDF = sdb.NameFilePDF
sdbNew.State = sdb.State
sdbNew.Error = sdb.Error
sdbNew.Message = sdb.Message
sdbNew.ReadyToSendFile = sdb.ReadyToSendFile
sdbNew.CompletedFileTransfer = sdb.CompletedFileTransfer
sdbNew.IDOrg = sdb.IDOrg
sdbNew.NameOrg = sdb.NameOrg
}
func generateReport(rep report.Report, org report.Organization, sdb *report.StateReportGenerationDB) {
defer rep.Clean()
log.Println(sdb.Value)
err := rep.Generate(sdb)
if err != nil {
log.Println(err)
err = os.Remove(filepath.Join(report.Dir.Cache, sdb.NameFilePDF))
if err != nil {
log.Println(err)
}
sdb.Error = err.Error()
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
return
}
sdb.ReadyToSendFile = true
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
}
func routineInitReportGeneration(h ServeReportHandler, dts time.Time, sdb *report.StateReportGenerationDB, req *http.Request) {
dbSaveState(2, nil, "Report generation Go routine initialized...", sdb)
tR := timeRange(req)
log.Println(tR)
g := h.newGrafanaClient(cfg.Server.Protocol+cfg.Server.Host, cfg.Server.Authentication.ApiKey, dashVariables(req))
rep := h.newReport(g, tR, sdb)
nPanels := len(rep.Panels())
if nPanels > 0 {
sdb.TitleDash = rep.Title()
sdb.CountPanels = nPanels
log.Println("Panels count = ", sdb.CountPanels)
report.DB.Save(&sdb)
//log.Println(sdb)
err := os.MkdirAll(report.Dir.Cache, 0777)
if err != nil {
err = fmt.Errorf("error creating cache directory at %v: %v", report.Dir.Cache, err)
log.Println(err)
}
refreshCache(dts, report.PeriodClearCache)
sdb.NameFilePDF = sdb.TitleDash + "_" + sdb.Value + ".pdf"
org := report.Organization{
ID: sdb.IDOrg,
Name: sdb.NameOrg,
}
if sdb.TimedOut {
log.Println("Timed out!")
return
}
generateReport(rep, org, sdb)
return
} else {
log.Println("No panels found!")
sdb.State = 800
sdb.Error = "No panels found!"
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
}
}
func checkWebSocket(w http.ResponseWriter, req *http.Request) (*websocket.Conn, []byte, error) {
req.Header["Upgrade"] = []string{"websocket"}
req.Header["Connection"] = []string{"Upgrade"}
ws, err := upgrader.Upgrade(w, req, nil)
if err != nil {
if _, ok := err.(websocket.HandshakeError); !ok {
log.Println(err)
return ws, []byte{}, err
}
log.Println(err)
return ws, []byte{}, err
}
_, p, err := ws.ReadMessage()
return ws, p, err
}
func wsWriteMessage(ws *websocket.Conn, state, message string) error {
p := []byte(`{"State":"` + state + `", "Message":"` + message + `"}`)
ws.SetWriteDeadline(time.Now().Add(writeWait))
if err := ws.WriteMessage(websocket.TextMessage, p); err != nil {
log.Println(err)
return err
}
log.Println("Message write complete!!!", state, message)
return nil
}
func verifyJWT(token string) bool {
tokenJWT, err := jwt.Parse(token, jwtMiddleware.Options.ValidationKeyGetter)
if err == nil {
return tokenJWT.Valid
}
log.Println(err)
return false
}
func dbSaveState(state int, err error, message string, sdb *report.StateReportGenerationDB) {
if err != nil {
log.Println(err.Error())
sdb.Error = err.Error()
} else {
sdb.Error = ""
}
sdb.State = state
sdb.Message = message
sdb.TimestampStateChange = time.Now().UnixNano()
report.DB.Save(sdb)
}
func dbIsRecordFound(resultDB *gorm.DB) bool {
return !errors.Is(resultDB.Error, gorm.ErrRecordNotFound)
}
func refreshCache(dts time.Time, periodClearCache time.Duration) {
cADB := report.CacheAgeDB{}
resultDB := report.DB.Last(&cADB)
if dbIsRecordFound(resultDB) {
diffTimeStamp := time.Duration(dts.Unix()-time.Unix(cADB.TS, 0).Unix()) * time.Second
log.Println("Reports cache age is: "+
diffTimeStamp.String(), "with max age for cache clear:",
periodClearCache.String())
if diffTimeStamp > periodClearCache {
clearCache()
cADB.TS = dts.Unix()
report.DB.Save(cADB)
}
} else {
report.DB.Create(&report.CacheAgeDB{
TS: dts.Unix(),
})
}
}
func clearCache() {
err := os.RemoveAll(report.Dir.Cache)
if err != nil {
log.Println("Error cleaning up cache directory:", err)
}
log.Println("Reports cache directory cleaned!")
}
func isFileExist(nameDir string, nameFile string) bool {
pathFile := filepath.Join(nameDir, nameFile)
info, err := os.Stat(pathFile)
if err == nil {
if !info.IsDir() {
log.Println("File:" + pathFile + " exists")
return true
}
//log.Println(pathFile + " is a Directory")
} else {
log.Println("File:" + pathFile + " does not exists")
}
return false
}
func getUniqueValue(dashID string, tR grafana.TimeRange, org report.Organization) string {
ageReportRefresh := int64((time.Duration(15) * time.Minute).Seconds())
tStrFromRound := strconv.FormatInt(int64(float64(tR.FromTS/ageReportRefresh))*ageReportRefresh, 10)
tStrToRound := strconv.FormatInt(int64(float64(tR.ToTS/ageReportRefresh))*ageReportRefresh, 10)
valueUnique := dashID + "_" + tStrFromRound + "_" + tStrToRound + "_" + strconv.Itoa(int(org.ID)) + "_" + org.Name
log.Println("Report unique value:", valueUnique)
return valueUnique
}
func getDashUID(r *http.Request) string {
vars := mux.Vars(r)
d := vars["dashId"]
log.Println("Dashboard UID:", d)
return d
}
func timeRange(r *http.Request) grafana.TimeRange {
params := r.URL.Query()
t := grafana.NewTimeRange(params.Get("from"), params.Get("to"))
timeFrom, _ := time.Parse(time.UnixDate, t.FromFormatted())
timeTo, _ := time.Parse(time.UnixDate, t.ToFormatted())
t.FromTS = grafana.ParseAbsTime(strconv.FormatInt(timeFrom.Unix(), 10)).Unix()
t.ToTS = grafana.ParseAbsTime(strconv.FormatInt(timeTo.Unix(), 10)).Unix()
return t
}
func renderTmplt(w http.ResponseWriter) {
err := tmplts.ExecuteTemplate(w, aD.UI+".html", aD)
if err != nil {
showError(w, err)
}
}
func loadPage(namePage string, w http.ResponseWriter) {
aD.UI = namePage
renderTmplt(w)
}
func showError(w http.ResponseWriter, err error) {
//log.Panicf(err.Error())
log.Println(err.Error())
//debug.PrintStack()
//http.Redirect(w, r, `/`, http.StatusSeeOther)
w.Write([]byte(err.Error()))
}
func isAuthorized(r *http.Request) bool {
sessionToken, err := r.Cookie(`grafana_session`)
if err == nil {
if len(sessionToken.Value) == 32 {
return true
}
}
return true
}
type neuteredFileSystem struct {
fs http.FileSystem
}
func (nfs neuteredFileSystem) Open(path string) (http.File, error) {
f, err := nfs.fs.Open(path)
if err != nil {
return nil, err
}
s, _ := f.Stat()
if s.IsDir() {
index := filepath.Join(path, "index.html")
if _, err := nfs.fs.Open(index); err != nil {
closeErr := f.Close()
if closeErr != nil {
return nil, closeErr
}
return nil, err
}
}
return f, nil
}
func dashVariables(r *http.Request) url.Values {
output := url.Values{}
for k, v := range r.URL.Query() {
if strings.HasPrefix(k, "var-") {
log.Println("Called with variable:", k, v)
for _, singleV := range v {
output.Add(k, singleV)
}
}
}
if len(output) == 0 {
log.Println("Called without variable")
}
return output
}
|
package gotten
import (
"github.com/Hexilee/gotten/headers"
"github.com/stretchr/testify/assert"
"net/http"
"testing"
)
var (
TestResponse = &http.Response{
StatusCode: http.StatusOK,
Header: map[string][]string{
headers.HeaderContentType: {"text/html"},
},
}
)
func TestCheckerFactory_Create(t *testing.T) {
assert.True(t, new(CheckerFactory).Create().Check(TestResponse))
assert.True(t, new(CheckerFactory).WhenStatuses(http.StatusOK).Create().Check(TestResponse))
assert.True(t, new(CheckerFactory).WhenStatuses(http.StatusOK, http.StatusAccepted).Create().Check(TestResponse))
assert.True(t, new(CheckerFactory).WhenContentType("text/html").Create().Check(TestResponse))
assert.True(t, new(CheckerFactory).WhenContentType("text/html", "text/xml").Create().Check(TestResponse))
assert.True(t, new(CheckerFactory).WhenStatuses(http.StatusOK).WhenContentType("text/html").Create().Check(TestResponse))
assert.False(t, new(CheckerFactory).WhenStatuses(http.StatusAccepted).Create().Check(TestResponse))
assert.False(t, new(CheckerFactory).WhenContentType("text/xml").Create().Check(TestResponse))
assert.False(t, new(CheckerFactory).WhenStatuses(http.StatusAccepted).WhenContentType("text/html").Create().Check(TestResponse))
assert.False(t, new(CheckerFactory).WhenStatuses(http.StatusOK).WhenContentType("text/xml").Create().Check(TestResponse))
assert.False(t, new(CheckerFactory).WhenStatuses(http.StatusAccepted).WhenContentType("text/xml").Create().Check(TestResponse))
}
|
package rpc_http_service
func init() {
go saveToDbLoggs_go()
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
)
func main() {
// new instance of NewMyType
m := NewMyType()
fmt.Fprintf(&m, "Hello from %s", "MyNewType")
// write examples
writeWithWrite()
writeWithFmt()
// read examples
readWithRead()
readWithBufioReader(m)
readAllBytes(m)
// assignment test: this code will assign at the io.Reader (not addressable) interface the MyType object
//var _ io.Reader = m -> compile error
var _ io.Reader = &m
}
// Write directly using the Write method.
func writeWithWrite() {
fmt.Println("---Using MyType.Write")
m := NewMyType()
m.Write([]byte{'a', 'b', 'c'})
m.ShowContent()
fmt.Println()
}
// Write using the Fprintf func.
func writeWithFmt() {
fmt.Println("---Using fmt.Fprintf")
m := NewMyType()
fmt.Fprintf(&m, " - Hello from %s", "MyNewType")
// Read data through the Stringer interface
fmt.Println(m)
fmt.Println()
}
// Use the Read interface to read 10 bytes at time
func readWithRead() {
fmt.Println("---Using MyType.Read")
m := NewMyType()
fmt.Fprintf(&m, "Hello from %s", "MyNewType")
//
buffer := bytes.Buffer{}
buf := make([]byte, 10)
for true {
n, err := m.Read(buf)
if err == io.EOF {
break
}
buffer.Write(buf[:n])
}
fmt.Printf("Read data from buffer: %s\n", buffer.Bytes())
fmt.Println()
}
/*
Example of reading the MyType using the bufio.Reader.
*/
func readWithBufioReader(m MyType) {
fmt.Println("---Using bufio.Reader")
r := bufio.NewReader(&m)
s, err := r.ReadString(' ')
testError(err)
fmt.Printf("string read through ReadString is %s\n", s)
b, err := r.ReadBytes(10)
testError(err)
fmt.Printf("string read through ReadBytes is %s\n", b)
fmt.Println()
}
/*
Read all the bytes.
*/
func readAllBytes(m MyType) {
fmt.Println("---Using ioutil.ReadAll")
data, err := ioutil.ReadAll(&m)
testError(err)
fmt.Printf("Data as hex: %x\n", data)
fmt.Printf("Data as string: %s\n", data)
fmt.Println("Number of bytes read:", len(data))
fmt.Println()
}
func useEmptyInterface() {
var empty interface{}
empty = 1
empty = ""
empty = true
empty = map[int]int{1: 100}
empty = MyType{}
fmt.Println(empty)
}
func testError(err error) {
if err != nil {
log.Println(err.Error())
}
}
|
package cmd
import (
"reflect"
"testing"
)
func TestMakeParseLabels(t *testing.T) {
successCases := []struct {
name string
labels string
expected map[string]string
}{
{
name: "test1",
labels: "foo=false",
expected: map[string]string{
"foo": "false",
},
},
{
name: "test2",
labels: "foo=true,bar=123",
expected: map[string]string{
"foo": "true",
"bar": "123",
},
},
}
for _, test := range successCases {
got, err := ParseLabels(test.labels)
if err != nil {
t.Errorf("unexpected error :%v", err)
}
if !reflect.DeepEqual(test.expected, got) {
t.Errorf("\nexpected:\n%v\ngot:\n%v", test.expected, got)
}
}
errorCases := []struct {
name string
labels interface{}
}{
{
name: "non-string",
labels: 123,
},
{
name: "empty string",
labels: "",
},
{
name: "error format",
labels: "abc=456;bcd=789",
},
{
name: "error format",
labels: "abc=456.bcd=789",
},
{
name: "error format",
labels: "abc,789",
},
{
name: "error format",
labels: "abc",
},
{
name: "error format",
labels: "=abc",
},
}
for _, test := range errorCases {
_, err := ParseLabels(test.labels)
if err == nil {
t.Errorf("labels %s expect error, reason: %s, got nil", test.labels, test.name)
}
}
}
|
// Copyright 2021 ZUP IT SERVICOS EM TECNOLOGIA E INOVACAO SA
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package engines_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
engine "github.com/ZupIT/horusec-engine"
"github.com/ZupIT/horusec-engine/text"
enginesenum "github.com/ZupIT/horusec/internal/enums/engines"
"github.com/ZupIT/horusec/internal/services/engines"
"github.com/ZupIT/horusec/internal/services/engines/csharp"
"github.com/ZupIT/horusec/internal/services/engines/dart"
"github.com/ZupIT/horusec/internal/services/engines/java"
"github.com/ZupIT/horusec/internal/services/engines/kotlin"
"github.com/ZupIT/horusec/internal/services/engines/kubernetes"
"github.com/ZupIT/horusec/internal/services/engines/leaks"
"github.com/ZupIT/horusec/internal/services/engines/nginx"
"github.com/ZupIT/horusec/internal/services/engines/nodejs"
"github.com/ZupIT/horusec/internal/services/engines/swift"
)
type testcase struct {
name string
src string
rule text.TextRule
findings []engine.Finding
}
func TestRulesVulnerableCode(t *testing.T) {
testcases := []testcase{
{
name: "Leaks-HS-LEAKS-1",
rule: leaks.NewAWSManagerID(),
src: SampleVulnerableLeaksRegularAWSManagerID,
findings: []engine.Finding{
{
CodeSample: "ACCESS_KEY: 'AKIAJSIE27KKMHXI3BJQ'",
SourceLocation: engine.Location{
Line: 7,
Column: 18,
},
},
},
},
{
name: "Leaks-HS-LEAKS-2",
rule: leaks.NewAWSSecretKey(),
src: SampleVulnerableLeaksRegularAWSSecretKey,
findings: []engine.Finding{
{
CodeSample: `AWS_SECRET_KEY: 'doc5eRXFpsWllGC5yKJV/Ymm5KwF+IRZo95EudOm'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-3",
rule: leaks.NewAWSMWSKey(),
src: SampleVulnerableLeaksRegularAWSMWSKey,
findings: []engine.Finding{
{
CodeSample: `AWS_WMS_KEY: 'amzn.mws.986478f0-9775-eabc-2af4-e499a8496828'`,
SourceLocation: engine.Location{
Line: 7,
Column: 20,
},
},
},
},
{
name: "Leaks-HS-LEAKS-4",
rule: leaks.NewFacebookSecretKey(),
src: SampleVulnerableLeaksRegularFacebookSecretKey,
findings: []engine.Finding{
{
CodeSample: `FB_SECRET_KEY: 'cb6f53505911332d30867f44a1c1b9b5'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-5",
rule: leaks.NewFacebookClientID(),
src: SampleVulnerableLeaksRegularFacebookClientID,
findings: []engine.Finding{
{
CodeSample: `FB_CLIENT_ID: '148695999071979'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-7",
rule: leaks.NewTwitterClientID(),
src: SampleVulnerableLeaksRegularTwitterClientID,
findings: []engine.Finding{
{
CodeSample: `TWITTER_CLIENT_ID: '1h6433fsvygnyre5a40'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-LEAKS-6",
rule: leaks.NewTwitterSecretKey(),
src: SampleVulnerableLeaksRegularTwitterSecretKey,
findings: []engine.Finding{
{
CodeSample: `TWITTER_SECRET_KEY: 'ej64cqk9k8px9ae3e47ip89l7if58tqhpxi1r'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-8",
rule: leaks.NewGithub(),
src: SampleVulnerableLeaksRegularGithub,
findings: []engine.Finding{
{
CodeSample: `GITHUB_SECRET_KEY: 'edzvPbU3SYUc7pFc9le20lzIRErTOaxCABQ1'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-9",
rule: leaks.NewLinkedInClientID(),
src: SampleVulnerableLeaksRegularLinkedInClientID,
findings: []engine.Finding{
{
CodeSample: `LINKEDIN_CLIENT_ID: 'g309xttlaw25'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-LEAKS-10",
rule: leaks.NewLinkedInSecretKey(),
src: SampleVulnerableLeaksRegularLinkedInSecretKey,
findings: []engine.Finding{
{
CodeSample: `LINKEDIN_SECRET_KEY: '0d16kcnjyfzmcmjp'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-LEAKS-11",
rule: leaks.NewSlack(),
src: SampleVulnerableLeaksRegularSlack,
findings: []engine.Finding{
{
CodeSample: `SLACK_WEBHOOK: 'https://hooks.slack.com/services/TNeqvYPeO/BncTJ74Hf/NlvFFKKAKPkd6h7FlQCz1Blu'`,
SourceLocation: engine.Location{
Line: 7,
Column: 22,
},
},
},
},
{
name: "Leaks-HS-LEAKS-12",
rule: leaks.NewAsymmetricPrivateKey(),
src: SampleVulnerableLeaksRegularAsymmetricPrivateKey,
findings: []engine.Finding{
{
CodeSample: `SSH_PRIVATE_KEY: '-----BEGIN PRIVATE KEY-----MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDBj08sp5++4anGcmQxJjAkBgNVBAoTHVByb2dyZXNzIFNvZnR3YXJlIENvcnBvcmF0aW9uMSAwHgYDVQQDDBcqLmF3cy10ZXN0LnByb2dyZXNzLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD...bml6YXRpb252YWxzaGEyZzIuY3JsMIGgBggrBgEFBQcBAQSBkzCBkDBNBggrBgEFBQcwAoZBaHR0cDovL3NlY3VyZS5nbG9iYWxzaWduLmNvbS9jYWNlcnQvZ3Nvcmdhz3P668YfhUbKdRF6S42Cg6zn-----END PRIVATE KEY-----'`,
SourceLocation: engine.Location{
Line: 7,
Column: 24,
},
},
},
},
{
name: "Leaks-HS-LEAKS-13",
rule: leaks.NewGoogleAPIKey(),
src: SampleVulnerableLeaksRegularGoogleAPIKey,
findings: []engine.Finding{
{
CodeSample: `GCP_API_KEY: 'AIzaMPZHYiu1RdzE1nG2SaVyOoz244TuacQIR6m'`,
SourceLocation: engine.Location{
Line: 7,
Column: 20,
},
},
},
},
{
name: "Leaks-HS-LEAKS-14",
rule: leaks.NewGoogleGCPServiceAccount(),
src: SampleVulnerableLeaksRegularGoogleGCPServiceAccount,
findings: []engine.Finding{
{
CodeSample: `GCP_SERVICE_ACCOUNT: '18256698220617903267772185514630273595-oy8_uzouz8tyy46y84ckrwei9_6rq_pb.apps.googleusercontent.com'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-15",
rule: leaks.NewHerokuAPIKey(),
src: SampleVulnerableLeaksRegularHerokuAPIKey,
findings: []engine.Finding{
{
CodeSample: `HEROKU_API_KEY: '3623f8e9-2d05-c9bb-2209082d6b5c'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-16",
rule: leaks.NewMailChimpAPIKey(),
src: SampleVulnerableLeaksRegularMailChimpAPIKey,
findings: []engine.Finding{
{
CodeSample: `MAILCHIMP_API_KEY: 'f7e9c13c10d0b19c3bb003a9f635d488-us72'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-17",
rule: leaks.NewMailgunAPIKey(),
src: SampleVulnerableLeaksRegularMailgunAPIKey,
findings: []engine.Finding{
{
CodeSample: `MAILGUN_API_KEY: 'key-xke9nbc2i5po5cjw3ngyxiz450zxpapu'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-18",
rule: leaks.NewPayPalBraintreeAccessToken(),
src: SampleVulnerableLeaksRegularPayPalBraintreeAccessToken,
findings: []engine.Finding{
{
CodeSample: `PAY_PAL_ACCESS_TOKEN: 'access_token$production$mk0sech2v7qqsol3$db651af2221c22b4ca2f0f583798135e'`,
SourceLocation: engine.Location{
Line: 7,
Column: 29,
},
},
},
},
{
name: "Leaks-HS-LEAKS-19",
rule: leaks.NewPicaticAPIKey(),
src: SampleVulnerableLeaksRegularPicaticAPIKey,
findings: []engine.Finding{
{
CodeSample: `PICATIC_API_KEY: 'sk_live_voy1p9k7r9g9j8ezmif488nk2p8310nl'`,
SourceLocation: engine.Location{
Line: 7,
Column: 24,
},
},
},
},
{
name: "Leaks-HS-LEAKS-20",
rule: leaks.NewSendGridAPIKey(),
src: SampleVulnerableLeaksRegularSendGridAPIKey,
findings: []engine.Finding{
{
CodeSample: `SEND_GRID_API_KEY: 'SG.44b7kq3FurdH0bSHBGjPSWhE8vJ.1evu4Un0TXFIb1_6zW4YOdjTMeE'`,
SourceLocation: engine.Location{
Line: 7,
Column: 26,
},
},
},
},
{
name: "Leaks-HS-LEAKS-21",
rule: leaks.NewStripeAPIKey(),
src: SampleVulnerableLeaksRegularStripeAPIKey,
findings: []engine.Finding{
{
CodeSample: `STRIPE_API_KEY: 'rk_live_8qSZpoI9t0BOGkOLVzvesc6K'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-22",
rule: leaks.NewSquareAccessToken(),
src: SampleVulnerableLeaksRegularSquareAccessToken,
findings: []engine.Finding{
{
CodeSample: `SQUARE_ACCESS_TOKEN: 'sq0atp-clYRBSht6oefa7w_2R56ra'`,
SourceLocation: engine.Location{
Line: 7,
Column: 28,
},
},
},
},
{
name: "Leaks-HS-LEAKS-23",
rule: leaks.NewSquareOAuthSecret(),
src: SampleVulnerableLeaksRegularSquareOAuthSecret,
findings: []engine.Finding{
{
CodeSample: `SQUARE_SECRET: 'sq0csp-LsEBYQNja]OgT3hRxjJV5cWX^XjpT12n3QkRY_vep2z'`,
SourceLocation: engine.Location{
Line: 7,
Column: 22,
},
},
},
},
{
name: "Leaks-HS-LEAKS-24",
rule: leaks.NewTwilioAPIKey(),
src: SampleVulnerableLeaksRegularTwilioAPIKey,
findings: []engine.Finding{
{
CodeSample: `TWILIO_API_KEY: '^SK9ae6bd84ccd091eb6bfad8e2a474af95'`,
SourceLocation: engine.Location{
Line: 7,
Column: 6,
},
},
},
},
{
name: "Leaks-HS-LEAKS-25",
rule: leaks.NewHardCodedCredentialGeneric(),
src: SampleVulnerableLeaksRegularHardCodedCredentialGeneric,
findings: []engine.Finding{
{
CodeSample: `POSTGRES_DBPASSWD: 'Ch@ng3m3'`,
SourceLocation: engine.Location{
Line: 7,
Column: 15,
},
},
},
},
{
name: "Leaks-HS-LEAKS-26",
rule: leaks.NewHardCodedPassword(),
src: SampleVulnerableLeaksRegularHardCodedPassword,
findings: []engine.Finding{
{
CodeSample: `DB_PASSWORD="gorm"`,
SourceLocation: engine.Location{
Line: 12,
Column: 4,
},
},
},
},
{
name: "Leaks-HS-LEAKS-27",
rule: leaks.NewPasswordExposedInHardcodedURL(),
src: SampleVulnerableLeaksRegularPasswordExposedInHardcodedURL,
findings: []engine.Finding{
{
CodeSample: `dsn := "postgresql://gorm:gorm@127.0.0.1:5432/gorm?sslmode=disable"`,
SourceLocation: engine.Location{
Line: 10,
Column: 9,
},
},
},
},
{
name: "Leaks-HS-LEAKS-28",
rule: leaks.NewWPConfig(),
src: SampleVulnerableLeaksRegularWPConfig,
findings: []engine.Finding{
{
CodeSample: `define('AUTH_KEY', 'put your unique phrase here');`,
SourceLocation: engine.Location{
Line: 3,
Column: 0,
},
},
{
CodeSample: `define('DB_PASSWORD', 'wen0221!');`,
SourceLocation: engine.Location{
Line: 4,
Column: 0,
},
},
},
},
}
for _, tt := range testcases {
t.Run(tt.name, func(t *testing.T) {
findings := executeRule(t, tt)
assert.Len(t, findings, len(tt.findings), "Expected equal issues on vulnerable code")
for idx, finding := range findings {
expected := tt.findings[idx]
assert.Equal(t, expected.CodeSample, finding.CodeSample)
assert.Equal(t, expected.SourceLocation, finding.SourceLocation)
assert.Equal(t, tt.rule.ID, finding.ID)
assert.Equal(t, tt.rule.Name, finding.Name)
assert.Equal(t, tt.rule.Severity, finding.Severity)
assert.Equal(t, tt.rule.Confidence, finding.Confidence)
assert.Equal(t, tt.rule.Description, finding.Description)
}
})
}
}
func TestRulesSafeCode(t *testing.T) {
testcases := []testcase{
{
name: "Leaks-HS-LEAKS-1",
rule: leaks.NewAWSManagerID(),
src: SampleSafeLeaksRegularAWSManagerID,
},
{
name: "Leaks-HS-LEAKS-2",
rule: leaks.NewAWSSecretKey(),
src: SampleSafeLeaksRegularAWSSecretKey,
},
{
name: "Leaks-HS-LEAKS-3",
rule: leaks.NewAWSMWSKey(),
src: SampleSafeLeaksRegularAWSMWSKey,
},
{
name: "Leaks-HS-LEAKS-4",
rule: leaks.NewFacebookSecretKey(),
src: SampleSafeLeaksRegularFacebookSecretKey,
},
{
name: "Leaks-HS-LEAKS-5",
rule: leaks.NewFacebookClientID(),
src: SampleSafeLeaksRegularFacebookClientID,
},
{
name: "Leaks-HS-LEAKS-7",
rule: leaks.NewTwitterClientID(),
src: SampleSafeLeaksRegularTwitterClientID,
},
{
name: "Leaks-LEAKS-6",
rule: leaks.NewTwitterSecretKey(),
src: SampleSafeLeaksRegularTwitterSecretKey,
},
{
name: "Leaks-HS-LEAKS-8",
rule: leaks.NewGithub(),
src: SampleSafeLeaksRegularGithub,
},
{
name: "Leaks-HS-LEAKS-9",
rule: leaks.NewLinkedInClientID(),
src: SampleSafeLeaksRegularLinkedInClientID,
},
{
name: "Leaks-LEAKS-10",
rule: leaks.NewLinkedInSecretKey(),
src: SampleSafeLeaksRegularLinkedInSecretKey,
},
{
name: "Leaks-LEAKS-11",
rule: leaks.NewSlack(),
src: SampleSafeLeaksRegularSlack,
},
{
name: "Leaks-HS-LEAKS-12",
rule: leaks.NewAsymmetricPrivateKey(),
src: SampleSafeLeaksRegularAsymmetricPrivateKey,
},
{
name: "Leaks-HS-LEAKS-13",
rule: leaks.NewGoogleAPIKey(),
src: SampleSafeLeaksRegularGoogleAPIKey,
},
{
name: "Leaks-HS-LEAKS-14",
rule: leaks.NewGoogleGCPServiceAccount(),
src: SampleSafeLeaksRegularGoogleGCPServiceAccount,
},
{
name: "Leaks-HS-LEAKS-15",
rule: leaks.NewHerokuAPIKey(),
src: SampleSafeLeaksRegularHerokuAPIKey,
},
{
name: "Leaks-HS-LEAKS-16",
rule: leaks.NewMailChimpAPIKey(),
src: SampleSafeLeaksRegularMailChimpAPIKey,
},
{
name: "Leaks-HS-LEAKS-17",
rule: leaks.NewMailgunAPIKey(),
src: SampleSafeLeaksRegularMailgunAPIKey,
},
{
name: "Leaks-HS-LEAKS-18",
rule: leaks.NewPayPalBraintreeAccessToken(),
src: SampleSafeLeaksRegularPayPalBraintreeAccessToken,
},
{
name: "Leaks-HS-LEAKS-19",
rule: leaks.NewPicaticAPIKey(),
src: SampleSafeLeaksRegularPicaticAPIKey,
},
{
name: "Leaks-HS-LEAKS-20",
rule: leaks.NewSendGridAPIKey(),
src: SampleSafeLeaksRegularSendGridAPIKey,
},
{
name: "Leaks-HS-LEAKS-21",
rule: leaks.NewStripeAPIKey(),
src: SampleSafeLeaksRegularStripeAPIKey,
},
{
name: "Leaks-HS-LEAKS-22",
rule: leaks.NewSquareAccessToken(),
src: SampleSafeLeaksRegularSquareAccessToken,
},
{
name: "Leaks-HS-LEAKS-23",
rule: leaks.NewSquareOAuthSecret(),
src: SampleSafeLeaksRegularSquareOAuthSecret,
},
{
name: "Leaks-HS-LEAKS-24",
rule: leaks.NewTwilioAPIKey(),
src: SampleSafeLeaksRegularTwilioAPIKey,
},
{
name: "Leaks-HS-LEAKS-25",
rule: leaks.NewHardCodedCredentialGeneric(),
src: SampleSafeLeaksRegularHardCodedCredentialGeneric,
},
{
name: "Leaks-HS-LEAKS-26",
rule: leaks.NewHardCodedPassword(),
src: SampleSafeLeaksRegularHardCodedPassword,
},
{
name: "Leaks-HS-LEAKS-27",
rule: leaks.NewPasswordExposedInHardcodedURL(),
src: SampleSafeLeaksRegularPasswordExposedInHardcodedURL,
},
{
name: "Leaks-HS-LEAKS-28",
rule: leaks.NewWPConfig(),
src: SampleSafeLeaksRegularWPConfig,
},
}
for _, tt := range testcases {
t.Run(tt.name, func(t *testing.T) {
findings := executeRule(t, tt)
assert.Empty(t, findings, "Expected not issues on safe code to rule %s", tt.name)
})
}
}
func TestGetRules(t *testing.T) {
testcases := []struct {
engine string
manager *engines.RuleManager
expectedTotalRules int
}{
{
engine: "Nodejs",
manager: nodejs.NewRules(),
expectedTotalRules: 53,
},
{
engine: "Nginx",
manager: nginx.NewRules(),
expectedTotalRules: 4,
},
{
engine: "Leaks",
manager: leaks.NewRules(),
expectedTotalRules: 28,
},
{
engine: "Kubernetes",
manager: kubernetes.NewRules(),
expectedTotalRules: 9,
},
{
engine: "Kotlin",
manager: kotlin.NewRules(),
expectedTotalRules: 40,
},
{
engine: "Java",
manager: java.NewRules(),
expectedTotalRules: 189,
},
{
engine: "Dart",
manager: dart.NewRules(),
expectedTotalRules: 17,
},
{
engine: "Csharp",
manager: csharp.NewRules(),
expectedTotalRules: 74,
},
{
engine: "Swift",
manager: swift.NewRules(),
expectedTotalRules: 23,
},
}
for _, tt := range testcases {
t.Run(tt.engine, func(t *testing.T) {
rules := tt.manager.GetAllRules()
expressions := 0
rulesID := map[string]bool{}
for _, rule := range rules {
r, ok := rule.(text.TextRule)
require.True(t, ok, "Expected rule type of text.TextRule, got %T", rule)
expressions += len(r.Expressions)
if rulesID[r.ID] == true {
t.Errorf(
"Rule in %s is duplicated ID(%s) => Name: %s, Description: %s, Type: %v", tt.engine, r.ID, r.Name, r.Description, r.Type,
)
} else {
// Record this element as an encountered element.
rulesID[r.ID] = true
}
}
assert.Greater(t, len(rules), 0)
assert.Greater(t, expressions, 0)
assert.Equal(t, len(rules), tt.expectedTotalRules, "Total rules is not equal the expected")
assert.Equal(t, len(rulesID), tt.expectedTotalRules, "Rules ID is not equal the expected")
})
}
}
func executeRule(tb testing.TB, tt testcase) []engine.Finding {
textFile, err := text.NewTextFile("", []byte(tt.src))
require.Nil(tb, err, "Expected nil error to create text file")
unit := text.TextUnit{
Files: []text.TextFile{
textFile,
},
}
return engine.RunMaxUnitsByAnalysis(
[]engine.Unit{unit}, []engine.Rule{tt.rule}, enginesenum.DefaultMaxUnitsPerAnalysis,
)
}
|
package main
import (
"encoding/json"
"os"
)
// Config for client
type Config struct {
LocalAddr string `json:"localaddr"`
RemoteAddr string `json:"remoteaddr"`
Key string `json:"key"`
Crypt string `json:"crypt"`
Mode string `json:"mode"`
Conn int `json:"conn"`
AutoExpire int `json:"autoexpire"`
ScavengeTTL int `json:"scavengettl"`
MTU int `json:"mtu"`
SndWnd int `json:"sndwnd"`
RcvWnd int `json:"rcvwnd"`
DataShard int `json:"datashard"`
ParityShard int `json:"parityshard"`
DSCP int `json:"dscp"`
NoComp bool `json:"nocomp"`
AckNodelay bool `json:"acknodelay"`
NoDelay int `json:"nodelay"`
Interval int `json:"interval"`
Resend int `json:"resend"`
NoCongestion int `json:"nc"`
SockBuf int `json:"sockbuf"`
SmuxVer int `json:"smuxver"`
SmuxBuf int `json:"smuxbuf"`
StreamBuf int `json:"streambuf"`
KeepAlive int `json:"keepalive"`
Log string `json:"log"`
SnmpLog string `json:"snmplog"`
SnmpPeriod int `json:"snmpperiod"`
Quiet bool `json:"quiet"`
TCP bool `json:"tcp"`
Vpn bool `json:"vpn"`
}
func parseJSONConfig(config *Config, path string) error {
file, err := os.Open(path) // For read access.
if err != nil {
return err
}
defer file.Close()
return json.NewDecoder(file).Decode(config)
}
|
package sese
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document03500106 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:sese.035.001.06 Document"`
Message *SecuritiesFinancingConfirmationV06 `xml:"SctiesFincgConf"`
}
func (d *Document03500106) AddMessage() *SecuritiesFinancingConfirmationV06 {
d.Message = new(SecuritiesFinancingConfirmationV06)
return d.Message
}
// Scope
// A securities financing transaction account servicer sends a SecuritiesFinancingConfirmation to an account owner to confirm or advise of the partial or full settlement of the opening or closing leg of a securities financing transaction.
//
// The account servicer/owner relationship may be:
// - a central securities depository or another settlement market infrastructure managing securities financing transactions on behalf of their participants
// - an agent (sub-custodian) managing securities financing transactions on behalf of their global custodian customer, or
// - a custodian managing securities financing transactions on behalf of an investment management institution or a broker/dealer.
//
//
// Usage
// The message may also be used to:
// - re-send a message previously sent,
// - provide a third party with a copy of a message for information,
// - re-send to a third party a copy of a message for information
// using the relevant elements in the Business Application Header.
type SecuritiesFinancingConfirmationV06 struct {
// Securities financing transaction identification information, type (repurchase agreement, reverse repurchase agreement, securities lending or securities borrowing) and other parameters.
TransactionIdentificationDetails *iso20022.TransactionTypeAndAdditionalParameters10 `xml:"TxIdDtls"`
// Additional parameters to the transaction.
AdditionalParameters *iso20022.AdditionalParameters24 `xml:"AddtlParams,omitempty"`
// Details of the securities financing deal.
TradeDetails *iso20022.SecuritiesTradeDetails55 `xml:"TradDtls"`
// Financial instrument representing a sum of rights of the investor vis-a-vis the issuer.
FinancialInstrumentIdentification *iso20022.SecurityIdentification19 `xml:"FinInstrmId"`
// Elements characterising a financial instrument.
FinancialInstrumentAttributes *iso20022.FinancialInstrumentAttributes64 `xml:"FinInstrmAttrbts,omitempty"`
// Details related to the account and quantity involved in the transaction.
QuantityAndAccountDetails *iso20022.QuantityAndAccount40 `xml:"QtyAndAcctDtls"`
// Details of the closing of the securities financing transaction.
SecuritiesFinancingDetails *iso20022.SecuritiesFinancingTransactionDetails28 `xml:"SctiesFincgDtls,omitempty"`
// Specifies what settlement standing instruction database is to be used to derive the settlement parties involved in the transaction.
StandingSettlementInstructionDetails *iso20022.StandingSettlementInstruction11 `xml:"StgSttlmInstrDtls,omitempty"`
// Parameters which explicitly state the conditions that must be fulfilled before a particular transaction of a financial instrument can be settled. These parameters are defined by the instructing party in compliance with settlement rules in the market the transaction will settle in.
SettlementParameters *iso20022.SettlementDetails96 `xml:"SttlmParams,omitempty"`
// Identifies the chain of delivering settlement parties.
DeliveringSettlementParties *iso20022.SettlementParties36 `xml:"DlvrgSttlmPties,omitempty"`
// Identifies the chain of receiving settlement parties.
ReceivingSettlementParties *iso20022.SettlementParties36 `xml:"RcvgSttlmPties,omitempty"`
// Cash parties involved in the transaction if different for the securities settlement parties.
CashParties *iso20022.CashParties26 `xml:"CshPties,omitempty"`
// Amount effectively settled and which will be credited to/debited from the account owner's cash account. It may differ from the instructed settlement amount based on market tolerance level.
SettledAmount *iso20022.AmountAndDirection46 `xml:"SttldAmt,omitempty"`
// Other amounts than the settlement amount.
OtherAmounts *iso20022.OtherAmounts31 `xml:"OthrAmts,omitempty"`
// Other business parties relevant to the transaction.
OtherBusinessParties *iso20022.OtherParties27 `xml:"OthrBizPties,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (s *SecuritiesFinancingConfirmationV06) AddTransactionIdentificationDetails() *iso20022.TransactionTypeAndAdditionalParameters10 {
s.TransactionIdentificationDetails = new(iso20022.TransactionTypeAndAdditionalParameters10)
return s.TransactionIdentificationDetails
}
func (s *SecuritiesFinancingConfirmationV06) AddAdditionalParameters() *iso20022.AdditionalParameters24 {
s.AdditionalParameters = new(iso20022.AdditionalParameters24)
return s.AdditionalParameters
}
func (s *SecuritiesFinancingConfirmationV06) AddTradeDetails() *iso20022.SecuritiesTradeDetails55 {
s.TradeDetails = new(iso20022.SecuritiesTradeDetails55)
return s.TradeDetails
}
func (s *SecuritiesFinancingConfirmationV06) AddFinancialInstrumentIdentification() *iso20022.SecurityIdentification19 {
s.FinancialInstrumentIdentification = new(iso20022.SecurityIdentification19)
return s.FinancialInstrumentIdentification
}
func (s *SecuritiesFinancingConfirmationV06) AddFinancialInstrumentAttributes() *iso20022.FinancialInstrumentAttributes64 {
s.FinancialInstrumentAttributes = new(iso20022.FinancialInstrumentAttributes64)
return s.FinancialInstrumentAttributes
}
func (s *SecuritiesFinancingConfirmationV06) AddQuantityAndAccountDetails() *iso20022.QuantityAndAccount40 {
s.QuantityAndAccountDetails = new(iso20022.QuantityAndAccount40)
return s.QuantityAndAccountDetails
}
func (s *SecuritiesFinancingConfirmationV06) AddSecuritiesFinancingDetails() *iso20022.SecuritiesFinancingTransactionDetails28 {
s.SecuritiesFinancingDetails = new(iso20022.SecuritiesFinancingTransactionDetails28)
return s.SecuritiesFinancingDetails
}
func (s *SecuritiesFinancingConfirmationV06) AddStandingSettlementInstructionDetails() *iso20022.StandingSettlementInstruction11 {
s.StandingSettlementInstructionDetails = new(iso20022.StandingSettlementInstruction11)
return s.StandingSettlementInstructionDetails
}
func (s *SecuritiesFinancingConfirmationV06) AddSettlementParameters() *iso20022.SettlementDetails96 {
s.SettlementParameters = new(iso20022.SettlementDetails96)
return s.SettlementParameters
}
func (s *SecuritiesFinancingConfirmationV06) AddDeliveringSettlementParties() *iso20022.SettlementParties36 {
s.DeliveringSettlementParties = new(iso20022.SettlementParties36)
return s.DeliveringSettlementParties
}
func (s *SecuritiesFinancingConfirmationV06) AddReceivingSettlementParties() *iso20022.SettlementParties36 {
s.ReceivingSettlementParties = new(iso20022.SettlementParties36)
return s.ReceivingSettlementParties
}
func (s *SecuritiesFinancingConfirmationV06) AddCashParties() *iso20022.CashParties26 {
s.CashParties = new(iso20022.CashParties26)
return s.CashParties
}
func (s *SecuritiesFinancingConfirmationV06) AddSettledAmount() *iso20022.AmountAndDirection46 {
s.SettledAmount = new(iso20022.AmountAndDirection46)
return s.SettledAmount
}
func (s *SecuritiesFinancingConfirmationV06) AddOtherAmounts() *iso20022.OtherAmounts31 {
s.OtherAmounts = new(iso20022.OtherAmounts31)
return s.OtherAmounts
}
func (s *SecuritiesFinancingConfirmationV06) AddOtherBusinessParties() *iso20022.OtherParties27 {
s.OtherBusinessParties = new(iso20022.OtherParties27)
return s.OtherBusinessParties
}
func (s *SecuritiesFinancingConfirmationV06) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
s.SupplementaryData = append(s.SupplementaryData, newValue)
return newValue
}
|
package main
//slice使用
//初始化后len==cap
//slice 在len<=cap时,增加数据slice后,len与cap的关系是,在小于1024时 cap按照2倍的形式增长,大于2014按照1/4形式增长
func main() {
}
|
// This file was generated by counterfeiter
package modelsfakes
import (
"gcp-service-broker/brokerapi/brokers/models"
"sync"
)
type FakeServiceBrokerHelper struct {
ProvisionStub func(instanceId string, details models.ProvisionDetails, plan models.PlanDetails) (models.ServiceInstanceDetails, error)
provisionMutex sync.RWMutex
provisionArgsForCall []struct {
instanceId string
details models.ProvisionDetails
plan models.PlanDetails
}
provisionReturns struct {
result1 models.ServiceInstanceDetails
result2 error
}
BindStub func(instanceID, bindingID string, details models.BindDetails) (models.ServiceBindingCredentials, error)
bindMutex sync.RWMutex
bindArgsForCall []struct {
instanceID string
bindingID string
details models.BindDetails
}
bindReturns struct {
result1 models.ServiceBindingCredentials
result2 error
}
BuildInstanceCredentialsStub func(bindDetails map[string]string, instanceDetails map[string]string) map[string]string
buildInstanceCredentialsMutex sync.RWMutex
buildInstanceCredentialsArgsForCall []struct {
bindDetails map[string]string
instanceDetails map[string]string
}
buildInstanceCredentialsReturns struct {
result1 map[string]string
}
UnbindStub func(details models.ServiceBindingCredentials) error
unbindMutex sync.RWMutex
unbindArgsForCall []struct {
details models.ServiceBindingCredentials
}
unbindReturns struct {
result1 error
}
DeprovisionStub func(instanceID string, details models.DeprovisionDetails) error
deprovisionMutex sync.RWMutex
deprovisionArgsForCall []struct {
instanceID string
details models.DeprovisionDetails
}
deprovisionReturns struct {
result1 error
}
PollInstanceStub func(instanceID string) (bool, error)
pollInstanceMutex sync.RWMutex
pollInstanceArgsForCall []struct {
instanceID string
}
pollInstanceReturns struct {
result1 bool
result2 error
}
AsyncStub func() bool
asyncMutex sync.RWMutex
asyncArgsForCall []struct{}
asyncReturns struct {
result1 bool
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeServiceBrokerHelper) Provision(instanceId string, details models.ProvisionDetails, plan models.PlanDetails) (models.ServiceInstanceDetails, error) {
fake.provisionMutex.Lock()
fake.provisionArgsForCall = append(fake.provisionArgsForCall, struct {
instanceId string
details models.ProvisionDetails
plan models.PlanDetails
}{instanceId, details, plan})
fake.recordInvocation("Provision", []interface{}{instanceId, details, plan})
fake.provisionMutex.Unlock()
if fake.ProvisionStub != nil {
return fake.ProvisionStub(instanceId, details, plan)
} else {
return fake.provisionReturns.result1, fake.provisionReturns.result2
}
}
func (fake *FakeServiceBrokerHelper) ProvisionCallCount() int {
fake.provisionMutex.RLock()
defer fake.provisionMutex.RUnlock()
return len(fake.provisionArgsForCall)
}
func (fake *FakeServiceBrokerHelper) ProvisionArgsForCall(i int) (string, models.ProvisionDetails, models.PlanDetails) {
fake.provisionMutex.RLock()
defer fake.provisionMutex.RUnlock()
return fake.provisionArgsForCall[i].instanceId, fake.provisionArgsForCall[i].details, fake.provisionArgsForCall[i].plan
}
func (fake *FakeServiceBrokerHelper) ProvisionReturns(result1 models.ServiceInstanceDetails, result2 error) {
fake.ProvisionStub = nil
fake.provisionReturns = struct {
result1 models.ServiceInstanceDetails
result2 error
}{result1, result2}
}
func (fake *FakeServiceBrokerHelper) Bind(instanceID string, bindingID string, details models.BindDetails) (models.ServiceBindingCredentials, error) {
fake.bindMutex.Lock()
fake.bindArgsForCall = append(fake.bindArgsForCall, struct {
instanceID string
bindingID string
details models.BindDetails
}{instanceID, bindingID, details})
fake.recordInvocation("Bind", []interface{}{instanceID, bindingID, details})
fake.bindMutex.Unlock()
if fake.BindStub != nil {
return fake.BindStub(instanceID, bindingID, details)
} else {
return fake.bindReturns.result1, fake.bindReturns.result2
}
}
func (fake *FakeServiceBrokerHelper) BindCallCount() int {
fake.bindMutex.RLock()
defer fake.bindMutex.RUnlock()
return len(fake.bindArgsForCall)
}
func (fake *FakeServiceBrokerHelper) BindArgsForCall(i int) (string, string, models.BindDetails) {
fake.bindMutex.RLock()
defer fake.bindMutex.RUnlock()
return fake.bindArgsForCall[i].instanceID, fake.bindArgsForCall[i].bindingID, fake.bindArgsForCall[i].details
}
func (fake *FakeServiceBrokerHelper) BindReturns(result1 models.ServiceBindingCredentials, result2 error) {
fake.BindStub = nil
fake.bindReturns = struct {
result1 models.ServiceBindingCredentials
result2 error
}{result1, result2}
}
func (fake *FakeServiceBrokerHelper) BuildInstanceCredentials(bindDetails map[string]string, instanceDetails map[string]string) map[string]string {
fake.buildInstanceCredentialsMutex.Lock()
fake.buildInstanceCredentialsArgsForCall = append(fake.buildInstanceCredentialsArgsForCall, struct {
bindDetails map[string]string
instanceDetails map[string]string
}{bindDetails, instanceDetails})
fake.recordInvocation("BuildInstanceCredentials", []interface{}{bindDetails, instanceDetails})
fake.buildInstanceCredentialsMutex.Unlock()
if fake.BuildInstanceCredentialsStub != nil {
return fake.BuildInstanceCredentialsStub(bindDetails, instanceDetails)
} else {
return fake.buildInstanceCredentialsReturns.result1
}
}
func (fake *FakeServiceBrokerHelper) BuildInstanceCredentialsCallCount() int {
fake.buildInstanceCredentialsMutex.RLock()
defer fake.buildInstanceCredentialsMutex.RUnlock()
return len(fake.buildInstanceCredentialsArgsForCall)
}
func (fake *FakeServiceBrokerHelper) BuildInstanceCredentialsArgsForCall(i int) (map[string]string, map[string]string) {
fake.buildInstanceCredentialsMutex.RLock()
defer fake.buildInstanceCredentialsMutex.RUnlock()
return fake.buildInstanceCredentialsArgsForCall[i].bindDetails, fake.buildInstanceCredentialsArgsForCall[i].instanceDetails
}
func (fake *FakeServiceBrokerHelper) BuildInstanceCredentialsReturns(result1 map[string]string) {
fake.BuildInstanceCredentialsStub = nil
fake.buildInstanceCredentialsReturns = struct {
result1 map[string]string
}{result1}
}
func (fake *FakeServiceBrokerHelper) Unbind(details models.ServiceBindingCredentials) error {
fake.unbindMutex.Lock()
fake.unbindArgsForCall = append(fake.unbindArgsForCall, struct {
details models.ServiceBindingCredentials
}{details})
fake.recordInvocation("Unbind", []interface{}{details})
fake.unbindMutex.Unlock()
if fake.UnbindStub != nil {
return fake.UnbindStub(details)
} else {
return fake.unbindReturns.result1
}
}
func (fake *FakeServiceBrokerHelper) UnbindCallCount() int {
fake.unbindMutex.RLock()
defer fake.unbindMutex.RUnlock()
return len(fake.unbindArgsForCall)
}
func (fake *FakeServiceBrokerHelper) UnbindArgsForCall(i int) models.ServiceBindingCredentials {
fake.unbindMutex.RLock()
defer fake.unbindMutex.RUnlock()
return fake.unbindArgsForCall[i].details
}
func (fake *FakeServiceBrokerHelper) UnbindReturns(result1 error) {
fake.UnbindStub = nil
fake.unbindReturns = struct {
result1 error
}{result1}
}
func (fake *FakeServiceBrokerHelper) Deprovision(instanceID string, details models.DeprovisionDetails) error {
fake.deprovisionMutex.Lock()
fake.deprovisionArgsForCall = append(fake.deprovisionArgsForCall, struct {
instanceID string
details models.DeprovisionDetails
}{instanceID, details})
fake.recordInvocation("Deprovision", []interface{}{instanceID, details})
fake.deprovisionMutex.Unlock()
if fake.DeprovisionStub != nil {
return fake.DeprovisionStub(instanceID, details)
} else {
return fake.deprovisionReturns.result1
}
}
func (fake *FakeServiceBrokerHelper) DeprovisionCallCount() int {
fake.deprovisionMutex.RLock()
defer fake.deprovisionMutex.RUnlock()
return len(fake.deprovisionArgsForCall)
}
func (fake *FakeServiceBrokerHelper) DeprovisionArgsForCall(i int) (string, models.DeprovisionDetails) {
fake.deprovisionMutex.RLock()
defer fake.deprovisionMutex.RUnlock()
return fake.deprovisionArgsForCall[i].instanceID, fake.deprovisionArgsForCall[i].details
}
func (fake *FakeServiceBrokerHelper) DeprovisionReturns(result1 error) {
fake.DeprovisionStub = nil
fake.deprovisionReturns = struct {
result1 error
}{result1}
}
func (fake *FakeServiceBrokerHelper) PollInstance(instanceID string) (bool, error) {
fake.pollInstanceMutex.Lock()
fake.pollInstanceArgsForCall = append(fake.pollInstanceArgsForCall, struct {
instanceID string
}{instanceID})
fake.recordInvocation("PollInstance", []interface{}{instanceID})
fake.pollInstanceMutex.Unlock()
if fake.PollInstanceStub != nil {
return fake.PollInstanceStub(instanceID)
} else {
return fake.pollInstanceReturns.result1, fake.pollInstanceReturns.result2
}
}
func (fake *FakeServiceBrokerHelper) PollInstanceCallCount() int {
fake.pollInstanceMutex.RLock()
defer fake.pollInstanceMutex.RUnlock()
return len(fake.pollInstanceArgsForCall)
}
func (fake *FakeServiceBrokerHelper) PollInstanceArgsForCall(i int) string {
fake.pollInstanceMutex.RLock()
defer fake.pollInstanceMutex.RUnlock()
return fake.pollInstanceArgsForCall[i].instanceID
}
func (fake *FakeServiceBrokerHelper) PollInstanceReturns(result1 bool, result2 error) {
fake.PollInstanceStub = nil
fake.pollInstanceReturns = struct {
result1 bool
result2 error
}{result1, result2}
}
func (fake *FakeServiceBrokerHelper) Async() bool {
fake.asyncMutex.Lock()
fake.asyncArgsForCall = append(fake.asyncArgsForCall, struct{}{})
fake.recordInvocation("Async", []interface{}{})
fake.asyncMutex.Unlock()
if fake.AsyncStub != nil {
return fake.AsyncStub()
} else {
return fake.asyncReturns.result1
}
}
func (fake *FakeServiceBrokerHelper) AsyncCallCount() int {
fake.asyncMutex.RLock()
defer fake.asyncMutex.RUnlock()
return len(fake.asyncArgsForCall)
}
func (fake *FakeServiceBrokerHelper) AsyncReturns(result1 bool) {
fake.AsyncStub = nil
fake.asyncReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeServiceBrokerHelper) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.provisionMutex.RLock()
defer fake.provisionMutex.RUnlock()
fake.bindMutex.RLock()
defer fake.bindMutex.RUnlock()
fake.buildInstanceCredentialsMutex.RLock()
defer fake.buildInstanceCredentialsMutex.RUnlock()
fake.unbindMutex.RLock()
defer fake.unbindMutex.RUnlock()
fake.deprovisionMutex.RLock()
defer fake.deprovisionMutex.RUnlock()
fake.pollInstanceMutex.RLock()
defer fake.pollInstanceMutex.RUnlock()
fake.asyncMutex.RLock()
defer fake.asyncMutex.RUnlock()
return fake.invocations
}
func (fake *FakeServiceBrokerHelper) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ models.ServiceBrokerHelper = new(FakeServiceBrokerHelper)
|
package main
import (
"fmt"
"flag"
"strings"
"github.com/zettazete/sms"
)
func main() {
var number string
var message string
flag.Parse()
if flag.NArg() < 2 {
fmt.Println("Usage: gotext {number} {message}")
return
}
number = flag.Arg(0)
message = strings.Join(flag.Args()[1:], " ")
resp, err := sms.Text(number, message)
if err != nil {
fmt.Println(err)
return
}
if !resp.Success {
fmt.Println(resp)
}
fmt.Println("Your message was sent successfully.")
}
|
package main
import (
"fmt"
"github.com/bearname/videohost/cmd/videoserver/config"
"github.com/bearname/videohost/internal/common/infrarstructure/mysql"
"github.com/bearname/videohost/internal/common/infrarstructure/server"
"github.com/bearname/videohost/internal/videoserver/infrastructure/transport/router"
_ "github.com/go-sql-driver/mysql"
log "github.com/sirupsen/logrus"
)
func main() {
//logFile := "video.log"
//log.SetFormatter(&log.JSONFormatter{})
//file, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)
//if err == nil {
// log.SetOutput(file)
// defer file.Close()
//}
parseConfig, err := config.ParseConfig()
if err != nil {
log.WithError(err).Fatal("failed to parse parseConfig")
}
connector := mysql.ConnectorImpl{}
err = connector.Connect(parseConfig.DbUser, parseConfig.DbPassword, parseConfig.DbAddress, parseConfig.DbName)
if err != nil {
fmt.Println("unable to connect to connector" + err.Error())
return
}
defer connector.Close()
connector.SetMaxOpenConns(10)
connector.SetConnMaxIdleTime(100)
handler := router.Router(&connector, parseConfig.MessageBrokerAddress, parseConfig.AuthServerAddress, parseConfig.RedisAddress, parseConfig.RedisPassword)
if handler == nil {
return
}
server.ExecuteServer("videoserver", parseConfig.Port, handler)
}
|
/*
* Copyright (c) 2020 - present Kurtosis Technologies LLC.
* All Rights Reserved.
*/
package fixed_size_example_network
import (
"fmt"
"github.com/gmarchetti/elasticsearch-indexer-testing-v1/elasticsearch_indexer/services"
"github.com/kurtosis-tech/kurtosis-go/lib/networks"
"github.com/kurtosis-tech/kurtosis-go/lib/services"
"github.com/palantir/stacktrace"
)
const (
vanillaConfigId networks.ConfigurationID = "vanilla"
serviceIdPrefix = "service-"
)
// ======================================== NETWORK ==============================================
type FixedSizeExampleNetwork struct{
rawNetwork *networks.ServiceNetwork
numNodes int
}
func (network FixedSizeExampleNetwork) GetNumNodes() int {
return network.numNodes
}
func (network *FixedSizeExampleNetwork) GetService(idInt int) (services.ExampleService, error) {
if idInt < 0 || idInt >= network.numNodes {
return nil, stacktrace.NewError("Invalid service ID '%v'", idInt)
}
serviceId := networks.ServiceID(fmt.Sprintf("%v%v", serviceIdPrefix, idInt))
serviceNode, err := network.rawNetwork.GetService(serviceId)
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred getting the service node info")
}
castedService := serviceNode.Service.(services.ExampleService)
return castedService, nil
}
// ======================================== NETWORK LOADER ==============================================
type FixedSizeExampleNetworkLoader struct {
numNodes int
serviceImage string
}
func NewFixedSizeExampleNetworkLoader(numNodes int, serviceImage string) *FixedSizeExampleNetworkLoader {
return &FixedSizeExampleNetworkLoader{
numNodes: numNodes,
serviceImage: serviceImage,
}
}
func (loader FixedSizeExampleNetworkLoader) ConfigureNetwork(builder *networks.ServiceNetworkBuilder) error {
builder.AddConfiguration(
vanillaConfigId,
loader.serviceImage,
services.ExampleServiceInitializerCore{},
services.ExampleAvailabilityCheckerCore{})
return nil
}
func (loader FixedSizeExampleNetworkLoader) InitializeNetwork(network *networks.ServiceNetwork) (map[networks.ServiceID]services.ServiceAvailabilityChecker, error) {
availabilityCheckers := map[networks.ServiceID]services.ServiceAvailabilityChecker{}
for i := 0; i < loader.numNodes; i++ {
serviceId := networks.ServiceID(fmt.Sprintf("%v%v", serviceIdPrefix, i))
checker, err := network.AddService(vanillaConfigId, serviceId, map[networks.ServiceID]bool{})
if err != nil {
return nil, stacktrace.Propagate(err, "An error occurred adding service with ID '%v' to the network", serviceId)
}
availabilityCheckers[serviceId] = *checker
}
return availabilityCheckers, nil
}
func (loader FixedSizeExampleNetworkLoader) WrapNetwork(network *networks.ServiceNetwork) (networks.Network, error) {
return FixedSizeExampleNetwork{
rawNetwork: network,
numNodes: loader.numNodes,
}, nil
}
|
package c35_mitm_diffie_hellman
import (
"bytes"
"testing"
)
func TestEchoStream(t *testing.T) {
uA := NewUser("A")
uB := NewUser("B")
msg := []byte("secret text")
EchoStream(uA, uB, msg)
if !bytes.Equal(uB.lastReceivedMessage, msg) || !bytes.Equal(uA.lastReceivedMessage, uB.lastReceivedMessage) {
t.Errorf("Incorrect EchoStream\n")
}
}
func TestEchoMITMStreamGEqualOne(t *testing.T) {
mitm(t, gEqualOne, 0)
}
func TestEchoMITMStreamGEqualP(t *testing.T) {
mitm(t, gEqualP, 0)
}
func TestEchoMITMStreamGPMinusOne(t *testing.T) {
mitm(t, gPMinusOne, 5)
}
func mitm(t *testing.T, gType, retry int) {
uA := NewUser("A")
uM := NewMITM("M")
uM.gType = gType
uB := NewUser("B")
msg := []byte("secret text")
EchoMITMStream(uA, uM, uB, msg)
if !bytes.Equal(uM.decryptedMessage, msg) {
if retry == 0 {
t.Fatalf("Incorrect EchoMITMStream\n")
} else {
mitm(t, gType, retry-1)
}
}
if !bytes.Equal(uB.lastReceivedMessage, msg) || !bytes.Equal(uA.lastReceivedMessage, uB.lastReceivedMessage) {
if retry == 0 {
t.Fatalf("Incorrect EchoStream\n")
} else {
mitm(t, gType, retry-1)
}
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package kubernetes
import v1 "k8s.io/api/core/v1"
// IsNodeReady returns true if the NodeReady condition of node is set to true.
//
// Copy of https://github.com/kubernetes/kubernetes/blob/886e04f1fffbb04faf8a9f9ee141143b2684ae68/pkg/api/v1/node/util.go#L40
func IsNodeReady(node *v1.Node) bool {
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeReady {
return c.Status == v1.ConditionTrue
}
}
return false
}
|
// Package auth contains authentication for the MQTT Server
package auth
// Interface for authentication
type Interface interface {
Username() string
CanConnect() bool
CanPublishTo(topic string) bool
CanSubscribeTo(topic string) bool
}
// Plugin for authentication
type Plugin func(clientIdentifier string, username string, password []byte) (Interface, error)
// NoAuth does not restrict
func NoAuth(clientIdentifier string, username string, password []byte) (Interface, error) {
return &noAuth{username: username}, nil
}
type noAuth struct {
username string
}
func (n noAuth) Username() string { return n.username }
func (n noAuth) CanConnect() bool { return true }
func (n noAuth) CanPublishTo(topic string) bool { return true }
func (n noAuth) CanSubscribeTo(topic string) bool { return true }
|
package api_test
import (
"errors"
"net/http"
"net/http/httptest"
"encoding/json"
"github.com/gorilla/mux"
. "github.com/hirondelle-app/api/api"
. "github.com/hirondelle-app/api/common/test"
"github.com/hirondelle-app/api/tweets"
. "github.com/hirondelle-app/api/tweets/test"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/mock"
"strings"
)
var _ = Describe("Tweets", func() {
var (
request *http.Request
responseRecorder *httptest.ResponseRecorder
mockTweetsManager *MockTweetsManager
tweetsHandlers *TweetsHandlers
err error
)
BeforeEach(func() {
responseRecorder = httptest.NewRecorder()
mockTweetsManager = &MockTweetsManager{}
tweetsHandlers = &TweetsHandlers{
Manager: mockTweetsManager,
}
})
Describe("GetTweetsEndpoint", func() {
JustBeforeEach(func() {
request, _ = http.NewRequest("GET", "/tweets", nil)
tweetsHandlers.GetTweetsEndpoint(responseRecorder, request)
})
AfterEach(func() {
err = nil
})
Context("when the manager successfully returns the tweets", func() {
BeforeEach(func() {
mockTweetsManager.On("GetAllTweets").Return([]tweets.Tweet{
tweets.Tweet{TweetID: "ec815d46-e647-11e6-9902-8bf35f54ad22", Likes: 34, Retweets: 45, KeywordID: 56, Keyword: tweets.Keyword{Label: "python"}},
tweets.Tweet{TweetID: "f0143618-e647-11e6-9656-4f834a284cc4", Likes: 43, Retweets: 76, KeywordID: 787, Keyword: tweets.Keyword{Label: "golang"}},
tweets.Tweet{TweetID: "f53feefc-e647-11e6-8213-07d71760f8c3", Likes: 56, Retweets: 93, KeywordID: 12, Keyword: tweets.Keyword{Label: "java"}},
}, nil)
})
It("should respond with a http status 200", func() {
Expect(responseRecorder.Code).To(Equal(200))
})
It(`should respond with a content type "application/json"`, func() {
Expect(responseRecorder.Header().Get("Content-Type")).To(Equal("application/json"))
})
It("should respond with a valid JSON", func() {
Expect(responseRecorder.Body.String()).To(MatchJSON(ReadContentFileString("test/tweets-response-success.json")))
})
It("should get the tweet list from the underlying service", func() {
Expect(len(mockTweetsManager.Calls)).To(Equal(1))
})
})
Context("when the manager fails to return the tweets", func() {
BeforeEach(func() {
err = errors.New("Something really terrible happened!")
mockTweetsManager.On("GetAllTweets").Return([]tweets.Tweet{}, err)
})
It("should respond with an invalid JSON", func() {
Expect(responseRecorder.Body.String()).NotTo(MatchJSON(ReadContentFileString("test/tweets-response-success.json")))
})
It("Should respond with an error", func() {
Expect(responseRecorder.Body.String()).To(MatchRegexp(err.Error()))
})
})
})
Describe("DeleteTweetEndpoint", func() {
JustBeforeEach(func() {
request, _ = http.NewRequest("DELETE", "/tweets/1", nil)
m := mux.NewRouter()
m.HandleFunc("/tweets/{tweetID}", tweetsHandlers.DeleteTweetEndpoint)
m.ServeHTTP(responseRecorder, request)
})
AfterEach(func() {
err = nil
})
Context("when the manager successfully delete the tweet", func() {
BeforeEach(func() {
mockTweetsManager.On("GetTweetByID", mock.Anything).Return(tweets.Tweet{
TweetID: "f53feefc-e647-11e6-8213-07d71760f8c3",
Likes: 56,
Retweets: 93,
KeywordID: 12,
Keyword: tweets.Keyword{Label: "java"}},
nil)
mockTweetsManager.On("DeleteTweet", mock.Anything).Return(nil)
})
It("should respond with a http status 204", func() {
Expect(responseRecorder.Code).To(Equal(204))
})
})
Context("when the manager fails to find the tweet", func() {
BeforeEach(func() {
err = errors.New("There is an error !")
mockTweetsManager.On("GetTweetByID", mock.Anything).Return(tweets.Tweet{}, err)
mockTweetsManager.On("DeleteTweet", mock.Anything).Return(err)
})
It("should respond with a http status code 404", func() {
Expect(responseRecorder.Code).To(Equal(404))
})
It("Should respond with an error", func() {
Expect(responseRecorder.Body.String()).To(MatchRegexp(err.Error()))
})
It(`should respond with a content type "application/json"`, func() {
Expect(responseRecorder.Header().Get("Content-Type")).To(Equal("application/json"))
})
})
Context("when the manager fails to delete the tweet", func() {
BeforeEach(func() {
err = errors.New("There is an error !")
mockTweetsManager.On("GetTweetByID", mock.Anything).Return(tweets.Tweet{
TweetID: "f53feefc-e647-11e6-8213-07d71760f8c3",
Likes: 56,
Retweets: 93,
KeywordID: 12,
Keyword: tweets.Keyword{Label: "java"}},
nil)
mockTweetsManager.On("DeleteTweet", mock.Anything).Return(err)
})
It("should respond with a http status code 400", func() {
Expect(responseRecorder.Code).To(Equal(400))
})
It("Should respond with an error", func() {
Expect(responseRecorder.Body.String()).To(MatchRegexp(err.Error()))
})
It(`should respond with a content type "application/json"`, func() {
Expect(responseRecorder.Header().Get("Content-Type")).To(Equal("application/json"))
})
})
})
XDescribe("PostKeywordEndpoint", func() {
Context("when the keyword is successfully created", func() {
JustBeforeEach(func() {
keyword := map[string]string{"label": "python"}
keywordJSON, _ := json.Marshal(keyword)
request, _ = http.NewRequest("POST", "/keywords", strings.NewReader(string(keywordJSON)))
tweetsHandlers.PostKeywordEndpoint(responseRecorder, request)
})
BeforeEach(func() {
mockTweetsManager.On("CreateKeyword", mock.Anything).Return(nil)
})
It("should respond with a http status 201", func() {
Expect(responseRecorder.Code).To(Equal(201))
})
It("should create the keyword", func() {
keywordUsedForCreation := mockTweetsManager.GetCallsForMethod("CreateKeyword")[0].Arguments.Get(0).(*tweets.Keyword)
Expect(keywordUsedForCreation).To(Equal(&tweets.Keyword{
Label: "python",
}))
})
})
Context("when the keyword is not set", func() {
err := errors.New("Label must not be empty")
JustBeforeEach(func() {
request, _ = http.NewRequest("POST", "/keywords", strings.NewReader(""))
tweetsHandlers.PostKeywordEndpoint(responseRecorder, request)
})
It("should respond with a http status 400", func() {
Expect(responseRecorder.Code).To(Equal(400))
})
It("should respond with the incorrect message", func() {
Expect(responseRecorder.Body.String()).To(MatchRegexp(err.Error()))
})
})
Context("when the keyword is not inserted in database", func() {
err := errors.New("There is an error with the database")
JustBeforeEach(func() {
keyword := map[string]string{"label": "python"}
keywordJSON, _ := json.Marshal(keyword)
request, _ = http.NewRequest("POST", "/keywords", strings.NewReader(string(keywordJSON)))
tweetsHandlers.PostKeywordEndpoint(responseRecorder, request)
})
BeforeEach(func() {
mockTweetsManager.On("CreateKeyword", mock.Anything).Return(err)
})
It("should respond with a http status 400", func() {
Expect(responseRecorder.Code).To(Equal(400))
})
It("should respond with the correct message", func() {
Expect(responseRecorder.Body.String()).To(MatchRegexp(err.Error()))
})
})
})
})
|
package authorization
import (
"testing"
"github.com/danielsomerfield/authful/server/handlers"
"fmt"
"net/url"
"github.com/danielsomerfield/authful/server/service/oauth"
"github.com/danielsomerfield/authful/common/util"
util2 "github.com/danielsomerfield/authful/common/util"
oauth2 "github.com/danielsomerfield/authful/server/wire/oauth"
"os"
"net/http"
)
var validClientId = "valid-client-id"
var validClientSecret = "valid-client-secret"
var invalidClientId = "invalid-client-id"
var defaultRedirect = "https://example.com/defaultRedirect"
var validClient = MockClient{
validRedirects: []string{"https://example.com/redirect"},
}
type MockClient struct {
validRedirects []string
}
func (MockClient) CheckSecret(secret string) bool {
return secret == validClientSecret
}
func (mc MockClient) GetDefaultRedirectURI() string {
return defaultRedirect
}
func (mc MockClient) GetScopes() []string {
return []string{}
}
func (mc MockClient) IsValidRedirectURI(uri string) bool {
return util2.Contains(mc.validRedirects, uri)
}
func MockClientLookupFn(clientId string) (oauth.Client, error) {
if clientId == validClientId {
return validClient, nil
} else {
return nil, nil
}
}
func MockErrorPageRenderer(error string) []byte {
return []byte(fmt.Sprintf("<html>%s</html>", error))
}
var approvalRequests map[string]*oauth2.AuthorizeRequest
func mockApprovalRequestStore(request *oauth2.AuthorizeRequest) string {
approvalRequests["random-request-id"] = request
return "random-request-id"
}
func mockApprovalLookup(approvalType string, requestId string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(fmt.Sprintf("approvalType = %s, requestId = %s", approvalType, requestId)))
}
}
var handler = NewAuthorizationHandler(
MockClientLookupFn,
//MockCodeGenerator,
MockErrorPageRenderer,
mockApprovalRequestStore,
mockApprovalLookup)
func TestAuthorizeHandler_successfulAuthorization(t *testing.T) {
responseType := "code"
state := "state1"
redirectUri := "https://example.com/redirect"
requestUrl := fmt.Sprintf("/authorize?client_id=%s&response_type=%s&state=%s&redirect_uri=%s",
validClientId, responseType, state, url.QueryEscape(redirectUri))
handlers.DoGetEndpointRequest(handler, requestUrl).
ThenAssert(func(response *handlers.EndpointResponse) error {
response.AssertHttpStatusEquals(200)
response.AssertResponseContent("approvalType = username-password, requestId = random-request-id", t)
expectedRequest := oauth2.AuthorizeRequest{
ResponseType: "code",
ClientId: validClientId,
RedirectURI: redirectUri,
State: state,
}
util.AssertEquals(1, len(approvalRequests), t)
util.AssertEquals(expectedRequest, *approvalRequests["random-request-id"], t)
return nil
}, t)
}
func TestAuthorizeHandler_invalidClient(t *testing.T) {
responseType := "code"
state := "state1"
redirectUri := "https://example.com/redirect"
requestUrl := fmt.Sprintf("/authorize?client_id=%s&response_type=%s&state=%s&redirect_uri=%s",
invalidClientId, responseType, state, url.QueryEscape(redirectUri))
handlers.DoGetEndpointRequest(handler, requestUrl).
ThenAssert(func(response *handlers.EndpointResponse) error {
response.AssertHttpStatusEquals(200) //TODO: 200? Seems common, but seems wrong.
response.AssertHeaderValue("Content-type", "text/html", t)
response.AssertResponseContent("<html>unknown_client</html>", t)
return nil
}, t)
}
func TestAuthorizeHandler_badRedirectURL(t *testing.T) {
responseType := "code"
state := "state1"
redirectUri := "https://example.com/badRedirect"
requestUrl := fmt.Sprintf("/authorize?client_id=%s&response_type=%s&state=%s&redirect_uri=%s",
validClientId, responseType, state, url.QueryEscape(redirectUri))
handlers.DoGetEndpointRequest(handler, requestUrl).
ThenAssert(func(response *handlers.EndpointResponse) error {
response.AssertHttpStatusEquals(200)
response.AssertHeaderValue("Content-type", "text/html", t)
response.AssertResponseContent("<html>invalid_redirect_uri</html>", t)
return nil
}, t)
}
func TestAuthorizeHandler_noRedirectURL(t *testing.T) {
responseType := "code"
state := "state1"
requestUrl := fmt.Sprintf("/authorize?client_id=%s&response_type=%s&state=%s",
validClientId, responseType, state)
handlers.DoGetEndpointRequest(handler, requestUrl).
ThenAssert(func(response *handlers.EndpointResponse) error {
response.AssertHttpStatusEquals(200)
response.AssertResponseContent("approvalType = username-password, requestId = random-request-id", t)
expectedRequest := oauth2.AuthorizeRequest{
ResponseType: "code",
ClientId: validClientId,
RedirectURI: defaultRedirect,
State: state,
}
util.AssertEquals(1, len(approvalRequests), t)
util.AssertEquals(expectedRequest, *approvalRequests["random-request-id"], t)
return nil
}, t)
}
func TestAuthorizeHandler_invalidScopesRequested(t *testing.T) {
responseType := "code"
state := "state1"
redirectUri := "https://example.com/redirect"
scope := "invalid-scope"
requestUrl := fmt.Sprintf("/authorize?client_id=%s&response_type=%s&state=%s&redirect_uri=%s&scope=%s",
validClientId, responseType, state, url.QueryEscape(redirectUri), scope)
handlers.DoGetEndpointRequest(handler, requestUrl).
ThenAssert(func(response *handlers.EndpointResponse) error {
response.AssertHttpStatusEquals(200)
response.AssertHeaderValue("Content-type", "text/html", t)
response.AssertResponseContent("<html>invalid_scope</html>", t)
return nil
}, t)
}
func TestAuthorizeHandler_invalidRequested(t *testing.T) {
responseType := "code"
state := "state1"
redirectUri := "https://example.com/redirect"
requestUrl := fmt.Sprintf("/authorize?response_type=%s&state=%s&redirect_uri=%s", responseType,
state, url.QueryEscape(redirectUri))
handlers.DoGetEndpointRequest(handler, requestUrl).
ThenAssert(func(response *handlers.EndpointResponse) error {
response.AssertHttpStatusEquals(200)
response.AssertHeaderValue("Content-type", "text/html", t)
response.AssertResponseContent("<html>invalid_request</html>", t)
return nil
}, t)
}
func TestMain(m *testing.M) {
approvalRequests = map[string]*oauth2.AuthorizeRequest{}
result := m.Run()
os.Exit(result)
}
|
package midtrans
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
)
type HttpClient interface {
Call(method string, url string, apiKey *string, options *ConfigOptions, body io.Reader, result interface{}) *Error
}
// HttpClientImplementation : this is for midtrans HttpClient Implementation
type HttpClientImplementation struct {
HttpClient *http.Client
Logger LoggerInterface
}
// Call the Midtrans API at specific `path` using the specified HTTP `method`. The result will be
// given to `result` if there is no error. If any error occurred, the return of this function is the `midtrans.Error`
// itself, otherwise nil.
func (c *HttpClientImplementation) Call(method string, url string, apiKey *string, options *ConfigOptions, body io.Reader, result interface{}) *Error {
// NewRequest is used by Call to generate an http.Request.
req, err := http.NewRequest(method, url, body)
if err != nil {
c.Logger.Error("Cannot create Midtrans request: %v", err)
return &Error{
Message: fmt.Sprintf("Error Request creation failed: %s", err.Error()),
RawError: err,
}
}
if options != nil {
if options.Ctx != nil {
req.WithContext(options.Ctx)
}
if options.IrisIdempotencyKey != nil {
req.Header.Add("X-Idempotency-Key", *options.IrisIdempotencyKey)
}
if options.PaymentIdempotencyKey != nil {
req.Header.Add("Idempotency-Key", *options.PaymentIdempotencyKey)
}
if options.PaymentOverrideNotification != nil {
req.Header.Add("X-Override-Notification", *options.PaymentOverrideNotification)
}
if options.PaymentAppendNotification != nil {
req.Header.Add("X-Append-Notification", *options.PaymentAppendNotification)
}
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Accept", "application/json")
req.Header.Add("User-Agent", "Midtrans-Go_"+libraryVersion)
if apiKey != nil {
key := *apiKey
if key == "" {
err := &Error{
Message: "The API Key (ServerKey/IrisApiKey) is invalid, as it is an empty string. Please double-check your API key. " +
"You can check from the Midtrans Dashboard. " +
"See https://docs.midtrans.com/en/midtrans-account/overview?id=retrieving-api-access-keys " +
"for the details or contact support at support@midtrans.com if you have any questions.",
}
c.Logger.Error("Authentication: ", err.GetMessage())
return err
} else if strings.Contains(key, " ") {
err := &Error{
Message: "The API Key (ServerKey/IrisApiKey) is contains white-space. Please double-check your API key. " +
"You can check the ServerKey from the Midtrans Dashboard. " +
"See https://docs.midtrans.com/en/midtrans-account/overview?id=retrieving-api-access-keys " +
"for the details or contact support at support@midtrans.com if you have any questions.",
}
c.Logger.Error("Authentication: ", err.GetMessage())
return err
} else {
req.SetBasicAuth(key, "")
}
}
c.Logger.Info("================ Request ================")
c.Logger.Info("%v Request %v %v", req.Method, req.URL, req.Proto)
logHttpHeaders(c.Logger, req.Header, true)
return c.DoRequest(req, result)
}
// DoRequest : is used by Call to execute an API request using HTTP client and parse the response into `result`.
func (c *HttpClientImplementation) DoRequest(req *http.Request, result interface{}) *Error {
start := time.Now()
res, err := c.HttpClient.Do(req)
if err != nil {
c.Logger.Error("Cannot send request: %v", err.Error())
var statusCode int
if res != nil {
statusCode = res.StatusCode
} else if strings.Contains(err.Error(), "timeout") {
statusCode = 408
} else {
statusCode = 0
}
return &Error{
Message: fmt.Sprintf("Error when request via HttpClient, Cannot send request with error: %s", err.Error()),
StatusCode: statusCode,
RawError: err,
}
}
if res != nil {
defer res.Body.Close()
c.Logger.Info("================== END ==================")
c.Logger.Info("Request completed in %v ", time.Since(start))
resBody, err := ioutil.ReadAll(res.Body)
if err != nil {
c.Logger.Error("Request failed: %v", err)
return &Error{
Message: "Cannot read response body: " + err.Error(),
StatusCode: res.StatusCode,
RawError: err,
}
}
rawResponse := newHTTPResponse(res, resBody)
c.Logger.Debug("=============== Response ===============")
// Loop through headers to perform log
logHttpHeaders(c.Logger, rawResponse.Header, false)
c.Logger.Debug("Response Body: %v", string(rawResponse.RawBody))
if result != nil {
if err = json.Unmarshal(resBody, &result); err != nil {
return &Error{
Message: fmt.Sprintf("Invalid body response, parse error during API request to Midtrans with message: %s", err.Error()),
StatusCode: res.StatusCode,
RawError: err,
RawApiResponse: rawResponse,
}
}
}
// Check status_code from Midtrans response body
if found, data := HasOwnProperty("status_code", resBody); found {
statusCode, _ := strconv.Atoi(data["status_code"].(string))
if statusCode >= 401 && statusCode != 407 {
return &Error{
Message: fmt.Sprintf("Midtrans API is returning API error. HTTP status code: %s API response: %s", strconv.Itoa(statusCode), string(resBody)),
StatusCode: statusCode,
RawApiResponse: rawResponse,
}
}
}
// Check StatusCode from Midtrans HTTP response api StatusCode
if res.StatusCode >= 400 {
return &Error{
Message: fmt.Sprintf("Midtrans API is returning API error. HTTP status code: %s API response: %s", strconv.Itoa(res.StatusCode), string(resBody)),
StatusCode: res.StatusCode,
RawApiResponse: rawResponse,
RawError: err,
}
}
}
return nil
}
// ApiResponse : is a structs that may come from Midtrans API endpoints
type ApiResponse struct {
Status string // e.g. "200 OK"
StatusCode int // e.g. 200
Proto string // e.g. "HTTP/1.0"
// response Header contain a map of all HTTP header keys to values.
Header http.Header
// response body
RawBody []byte
// request that was sent to obtain the response
Request *http.Request
}
// newHTTPResponse : internal function to set HTTP Raw response return to ApiResponse
func newHTTPResponse(res *http.Response, responseBody []byte) *ApiResponse {
return &ApiResponse{
Status: res.Status,
StatusCode: res.StatusCode,
Proto: res.Proto,
Header: res.Header,
RawBody: responseBody,
Request: res.Request,
}
}
// logHttpHeaders : internal function to perform log from headers
func logHttpHeaders(log LoggerInterface, header http.Header, isReq bool) {
// Loop through headers to perform log
for name, headers := range header {
name = strings.ToLower(name)
for _, h := range headers {
if name == "authorization" {
log.Debug("%v: %v", name, h)
} else {
if isReq {
log.Info("%v: %v", name, h)
} else {
log.Debug("%v: %v", name, h)
}
}
}
}
}
//HasOwnProperty : Convert HTTP raw response body to map and check if the body has own field
func HasOwnProperty(key string, body []byte) (bool, map[string]interface{}) {
d := make(map[string]interface{})
_ = json.Unmarshal(body, &d)
if _, found := d[key].(string); found {
return found, d
} else {
return found, d
}
}
|
package util
import (
"fmt"
"os"
"time"
"gopkg.in/yaml.v2"
)
// Config contain direktiv configuration.
type Config struct {
FunctionsService string `yaml:"functions-service"`
// FunctionsTimeout : Action timeout in milliseconds
FunctionsTimeout int64 `yaml:"functions-timeout"`
FlowService string `yaml:"flow-service"`
PrometheusBackend string `yaml:"prometheus-backend"`
OpenTelemetryBackend string `yaml:"opentelemetry-backend"`
Eventing bool `yaml:"eventing"`
}
// ReadConfig reads direktiv config file.
func ReadConfig(file string) (*Config, error) {
c := new(Config)
/* #nosec */
data, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("failed to read configuration file: %w", err)
}
err = yaml.Unmarshal(data, c)
if err != nil {
return nil, fmt.Errorf("failed to parse configuration file: %w", err)
}
return c, nil
}
func (cfg *Config) GetTelemetryBackendAddr() string {
return cfg.OpenTelemetryBackend
}
func (cfg *Config) GetFunctionsTimeout() time.Duration {
return time.Second * time.Duration(cfg.FunctionsTimeout)
}
|
package main
import (
"GoFileWatcher/cli"
"fmt"
FolderWatcher "github.com/mikerapa/FolderWatcher"
"log"
"os"
"sync"
)
func main() {
wg := sync.WaitGroup{}
commandLineSettings, err := cli.GetCommandLineSettings(os.Args[1:])
if err != nil {
log.Fatal(err)
return
}
watcher := FolderWatcher.New()
paused := false
pauseChannel := make(chan bool)
exitChannel := make(chan bool)
// add watchers from the command line
for _, folderPath := range commandLineSettings.FolderPaths {
// TODO the showHidden parameter is hard-coded
if err := watcher.AddFolder(folderPath, commandLineSettings.Recursive, false); err != nil {
// Just display the error and move on
cli.DisplayError(err)
}
}
// handle events
go func() {
for {
select {
case event := <-watcher.FileChanged:
// Print out the event to the screen.
if !paused {
cli.DisplayEvent(event)
}
case <-watcher.Stopped:
return
case p := <-pauseChannel:
// React to changes in the paused status
if paused != p {
paused = p
cli.DisplayEventPause(paused)
}
case <-exitChannel:
wg.Done()
}
}
}()
go cli.RunMenu(pauseChannel, exitChannel, &watcher)
cli.DisplayWatchedFolderList(watcher.RequestedWatches)
fmt.Println()
// Start the watcher
wg.Add(1)
watcher.Start()
// Shut down
wg.Wait()
println("Shutting down the watcher")
watcher.Stop()
}
|
//Package main calls examples of firstclass functions tutorial
package main
import (
"firstclassfunc/mapfunc"
"firstclassfunc/students"
"firstclassfunc/usertypefunc"
"firstclassfunc/anonfunc"
)
func main() {
anonfunc.AssignFuncToVariable()
anonfunc.CallAnonFunc()
anonfunc.PassArgToAnonFunc()
usertypefunc.DefUseTypedFunc()
students.RunStudentFilter()
mapfunc.RunIntMapMultiplyTo5()
mapfunc.RunIntDivByNom()
}
|
package modifier
import (
"github.com/hashicorp/go-plugin"
"github.com/jonmorehouse/gatekeeper/gatekeeper"
"github.com/jonmorehouse/gatekeeper/internal"
)
// Plugin is the interface which a plugin will implement and pass to `RunPlugin`
type Plugin interface {
// internal.Plugin exposes the following methods, per:
// https://github.com/jonmorehouse/gateekeeper/tree/master/internal/plugin.go
//
// Start() error
// Stop() error
// Heartbeat() error
// Configure(map[string]interface{}) error
//
internal.BasePlugin
// Modify a request, changing anything about the requests' nature.
// Specifically this could mean, swapping out the backend, swapping out
// the upstream, returning an error, adding a response or anything
// else. Adding a Response to the request or adding / returning an
// error will stop the request life cycle immediately and will return
// immediately. If a response is added, that will be written back
// directly where as an error will trigger the ErrorResponse method.
// Returning an error from this method should only be done in
// extenuating circumstances and will trigger an internal error
ModifyRequest(*gatekeeper.Request) (*gatekeeper.Request, error)
// Modify the response, changing any attributes, headers, the body,
// that are desired before sending the response back to the client.
// This method should only return an error in the case of an
// extenuating circumstance and/or when the response body can be
// dropped all together. Most likely speaking, that would only be in
// the case of a fatal failure such as datastore being down etc.
ModifyResponse(*gatekeeper.Request, *gatekeeper.Response) (*gatekeeper.Response, error)
// Modify a response that was flagged as an error. This is similar to
// the ModifyResponse method, again giving complete control over the
// response that is written back to the client.
ModifyErrorResponse(error, *gatekeeper.Request, *gatekeeper.Response) (*gatekeeper.Response, error)
}
// PluginClient in this case is the gatekeeper/core application. PluginClient
// is the interface that the user of this plugin sees and is simply a wrapper
// around *RPCClient. This is merely a wrapper which returns a clean interface
// with error interfaces instead of *gatekeeper.Error types
type PluginClient interface {
internal.BasePluginClient
ModifyRequest(*gatekeeper.Request) (*gatekeeper.Request, error)
ModifyResponse(*gatekeeper.Request, *gatekeeper.Response) (*gatekeeper.Response, error)
ModifyErrorResponse(error, *gatekeeper.Request, *gatekeeper.Response) (*gatekeeper.Response, error)
}
func NewPluginClient(rpcClient *RPCClient, client *plugin.Client) PluginClient {
return &pluginClient{
rpcClient,
internal.NewBasePluginClient(rpcClient, client),
}
}
type pluginClient struct {
pluginRPC *RPCClient
internal.BasePluginClient
}
func (p *pluginClient) ModifyRequest(req *gatekeeper.Request) (*gatekeeper.Request, error) {
req, err := p.pluginRPC.ModifyRequest(req)
if err != nil {
return req, err
}
return req, nil
}
func (p *pluginClient) ModifyResponse(req *gatekeeper.Request, resp *gatekeeper.Response) (*gatekeeper.Response, error) {
resp, err := p.pluginRPC.ModifyResponse(req, resp)
if err != nil {
return resp, err
}
return resp, nil
}
func (p *pluginClient) ModifyErrorResponse(respErr error, req *gatekeeper.Request, resp *gatekeeper.Response) (*gatekeeper.Response, error) {
resp, err := p.pluginRPC.ModifyErrorResponse(gatekeeper.NewError(respErr), req, resp)
if err != nil {
return resp, err
}
return resp, nil
}
|
package s3
import (
"encoding/json"
"fmt"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/root-gg/plik/server/common"
)
// Build Server Side Encryption configuration
func (b *Backend) getServerSideEncryption(file *common.File) (sse encrypt.ServerSide, err error) {
switch encrypt.Type(b.config.SSE) {
case "":
return nil, nil
case encrypt.S3:
return encrypt.NewSSE(), nil
case encrypt.SSEC:
key, err := getServerSideEncryptionKey(file)
if err != nil {
return nil, fmt.Errorf("unable to get Server Side Encryption Key : %s", err)
}
return encrypt.NewSSEC([]byte(key))
case encrypt.KMS:
return nil, fmt.Errorf("KMS server side encryption is not yet implemented")
default:
return nil, fmt.Errorf("invalid SSE type %s", b.config.SSE)
}
}
// Generate a 32Bytes / 256bits encryption key
func genServerSideEncryptionKey() string {
return common.GenerateRandomID(32)
}
// Get the SSE Key from the file backend details or generate one and store it in the file backend details
func getServerSideEncryptionKey(file *common.File) (key string, err error) {
// Retrieve the SSE Key from the backend details
if file.BackendDetails != "" {
backendDetails := &BackendDetails{}
err = json.Unmarshal([]byte(file.BackendDetails), backendDetails)
if err != nil {
return "", fmt.Errorf("unable to deserialize backend details : %s", err)
}
if backendDetails.SSEKey != "" {
return backendDetails.SSEKey, nil
}
}
key = genServerSideEncryptionKey()
// Store the SSE Key in the backend details
err = setServerSideEncryptionKey(file, key)
if err != nil {
return "", err
}
return key, nil
}
// Add the SSE Key to the file backend details
func setServerSideEncryptionKey(file *common.File, key string) (err error) {
backendDetails := &BackendDetails{}
if file.BackendDetails != "" {
err = json.Unmarshal([]byte(file.BackendDetails), backendDetails)
if err != nil {
return fmt.Errorf("unable to deserialize backend details : %s", err)
}
}
backendDetails.SSEKey = key
backendDetailsJSON, err := json.Marshal(backendDetails)
if err != nil {
return fmt.Errorf("unable to serialize backend details : %s", err)
}
file.BackendDetails = string(backendDetailsJSON)
return nil
}
|
// time: o(n), space: o(n)
func partitionDisjoint(A []int) int {
mins := make([]int, len(A))
m := 1000001
for i, _ := range A {
idx := len(A) - 1 - i
if m > A[idx] {
m = A[idx]
}
mins[idx] = m
}
m = 0
for i := 0; i < len(A) - 1; i++ {
if m < A[i] {
m = A[i]
}
if m <= mins[i+1] {
return i + 1
}
}
return -1
}
|
package domain
import (
"encoding/json"
"fmt"
"log"
"sort"
"time"
)
type Logs []*Log
func (l Logs) Less(i, j int) bool {
return time.Time(l[i].Start).Before(time.Time(l[j].Start))
}
func (l Logs) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l Logs) Len() int {
return len(l)
}
type JSONTime time.Time
func (t JSONTime) MarshalJSON() ([]byte, error) {
stamp := fmt.Sprintf("\"%s\"", time.Time(t).Format("2006-01-02T15:04:05.000Z07:00"))
return []byte(stamp), nil
}
type Log struct {
Start JSONTime `json:"start"`
End JSONTime `json:"end"`
Service string `json:"service"`
Span string `json:"span"`
Calls Logs `json:"calls"`
Parent string `json:"-"`
Trace string `json:"-"`
}
// Tree depth first insertion runs with
// O(log N) time complexity
func (l *Log) Insert(child *Log) bool {
if l == nil {
return false
}
if l.Span == child.Parent {
l.Calls = append(l.Calls, child)
sort.Sort(l.Calls)
return true
}
for _, c := range l.Calls {
if c.Insert(child) {
return true
}
}
return false
}
func (l *Log) String() string {
b, err := json.Marshal(l)
if err != nil {
log.Fatalf("Error: %s", err)
return ""
}
return string(b)
}
type LogTree struct {
ID string `json:"id"`
Root *Log `json:"root"`
Orphens map[string]Logs `json:"-"`
}
func (lt *LogTree) String() string {
b, err := json.Marshal(lt)
if err != nil {
log.Fatalf("Error: %s", err)
return ""
}
return string(b)
}
//TODO check if maybe better to construct only on write
func (lt *LogTree) Insert(l *Log) {
// create partial tree
if calls, ok := lt.Orphens[l.Span]; ok {
l.Calls = calls
sort.Sort(l.Calls)
delete(lt.Orphens, l.Span)
}
if l.Parent == "null" {
lt.Root = l
} else if !lt.Root.Insert(l) {
lt.AddOrphen(l)
}
}
func (lt *LogTree) AddOrphen(l *Log) {
if calls, ok := lt.Orphens[l.Parent]; ok {
lt.Orphens[l.Parent] = append(calls, l)
} else {
//Look throught the current orphans for a parent
var success = false
for _, logs := range lt.Orphens {
for _, ll := range logs {
if ll.Insert(l) {
success = true
break
}
}
}
if !success {
lt.Orphens[l.Parent] = Logs{l}
}
}
}
func NewLog(properties map[string]string) (*Log, error) {
start, err := time.Parse(time.RFC3339, properties["start"])
if err != nil {
return nil, err
}
end, err := time.Parse(time.RFC3339, properties["end"])
if err != nil {
return nil, err
}
return &Log{
Trace: properties["trace"],
Start: JSONTime(start),
End: JSONTime(end),
Service: properties["service"],
Span: properties["span"],
Parent: properties["caller_span"],
Calls: make(Logs, 0),
}, nil
}
|
package action
import (
"fmt"
"github.com/fatih/color"
"github.com/urfave/cli"
)
// Git runs git commands inside the store or mounts
func (s *Action) Git(c *cli.Context) error {
store := c.String("store")
return s.Store.Git(store, c.Args()...)
}
// GitInit initializes a git repo
func (s *Action) GitInit(c *cli.Context) error {
store := c.String("store")
sk := c.String("sign-key")
return s.gitInit(store, sk)
}
func (s *Action) gitInit(store, sk string) error {
if sk == "" {
s, err := s.askForPrivateKey(color.CyanString("Please select a key for signing Git Commits"))
if err == nil {
sk = s
}
}
if err := s.Store.GitInit(store, sk); err != nil {
return err
}
fmt.Fprintln(color.Output, color.GreenString("Git initialized"))
return nil
}
|
package decodeways
import (
"bufio"
"encoding/json"
"io"
"os"
"testing"
)
type Test struct {
Input string `json:"input"`
Output int `json:"output"`
}
func TestDecodeWays(test *testing.T) {
f, err := os.Open("./tests.json")
if err != nil {
test.Error(err)
}
defer f.Close()
reader := bufio.NewReader(f)
decoder := json.NewDecoder(reader)
for {
var tests map[string]Test
err = decoder.Decode(&tests)
if err == nil {
for name, tst := range tests {
test.Run(name, func(st *testing.T) {
if NumDecodings(tst.Input) != tst.Output {
st.Errorf("Use case %v failed\n", tst)
}
})
}
} else if err == io.EOF {
break
} else {
test.Error(err)
}
}
}
|
package command
import (
"github.com/payfazz/fazz-swagger/internal/compile"
"github.com/spf13/cobra"
)
type compileCommand struct{}
// NewCompile create compile as sub command
func NewCompileCommand() *cobra.Command {
c := compileCommand{}
cmd := &cobra.Command{
Use: "compile [directory]",
Short: "Compile files to giant Swagger File",
Args: cobra.ExactArgs(1),
Run: c.Run,
}
return cmd
}
// Run the executes command logic
func (c *compileCommand) Run(cmd *cobra.Command, args []string) {
compile.Compile(args[0])
}
|
// Code for parsing XML coverage output (eg. Java or Python).
package test
import "encoding/xml"
import "strings"
import "core"
func parseXMLCoverageResults(target *core.BuildTarget, coverage *core.TestCoverage, data []byte) error {
xcoverage := xmlCoverage{}
if err := xml.Unmarshal(data, &xcoverage); err != nil {
return err
}
for _, pkg := range xcoverage.Packages.Package {
for _, cls := range pkg.Classes.Class {
if strings.HasPrefix(cls.Filename, core.RepoRoot) {
cls.Filename = cls.Filename[len(core.RepoRoot):]
}
// There can be multiple classes per file so we must merge here, not overwrite.
coverage.Files[cls.Filename] = core.MergeCoverageLines(coverage.Files[cls.Filename], parseXMLLines(cls.Lines.Line))
}
}
coverage.Tests[target.Label] = coverage.Files
return nil
}
func parseXMLLines(lines []xmlCoverageLine) []core.LineCoverage {
ret := []core.LineCoverage{}
for _, line := range lines {
for i := len(ret) + 1; i < line.Number; i++ {
ret = append(ret, core.NotExecutable)
}
if line.Hits > 0 {
ret = append(ret, core.Covered)
} else {
ret = append(ret, core.Uncovered)
}
}
return ret
}
// Note that this is based off coverage.py's format, which is originally a Java format
// so some of the structures are a little awkward (eg. 'classes' actually refer to Python modules, not classes).
type xmlCoverage struct {
Packages struct {
Package []struct {
Classes struct {
Class []struct {
LineRate float32 `xml:"line-rate,attr"`
Filename string `xml:"filename,attr"`
Name string `xml:"name,attr"`
Lines struct {
Line []xmlCoverageLine `xml:"line"`
} `xml:"lines"`
} `xml:"class"`
} `xml:"classes"`
} `xml:"package"`
} `xml:"packages"`
}
type xmlCoverageLine struct {
Hits int `xml:"hits,attr"`
Number int `xml:"number,attr"`
}
|
package day7
import (
"testing"
"github.com/achakravarty/30-days-of-go/assert"
)
type testCase struct {
arr []int
expected []int
}
var testCases = []testCase{
testCase{arr: []int{1, 2, 3}, expected: []int{3, 2, 1}},
}
func TestArrays(t *testing.T) {
for _, testInput := range testCases {
actual := Reverse(testInput.arr)
assert.EqualIntArray(t, testInput.expected, actual)
}
}
|
package main
import (
"errors"
"log"
"syscall"
"unsafe"
)
var ()
type FlutterEmbedderGLFW struct {
flutter_embedder syscall.Handle
procCreateFlutterWindowInSnapshotMode uintptr
procFlutterWindowLoop uintptr
procFlutterTerminate uintptr
procFlutterInit uintptr
}
func LoadFlutterEmbedderGLFW(path string) (feg *FlutterEmbedderGLFW, err error) {
feg = new(FlutterEmbedderGLFW)
feg.flutter_embedder, err = syscall.LoadLibrary("flutter_embedder.dll")
if err != nil {
log.Printf("error on load flutter_embedder.dll")
return
}
feg.procFlutterInit, err = syscall.GetProcAddress(feg.flutter_embedder, "FlutterInit")
if err != nil {
log.Printf("error on load FlutterInit")
return
}
feg.procCreateFlutterWindowInSnapshotMode, err = syscall.GetProcAddress(feg.flutter_embedder, "CreateFlutterWindowInSnapshotMode_GoLangFriendly")
if err != nil {
log.Printf("error on load CreateFlutterWindowInSnapshotMode_GoLangFriendly")
return
}
feg.procFlutterWindowLoop, err = syscall.GetProcAddress(feg.flutter_embedder, "FlutterWindowLoop")
if err != nil {
log.Printf("error on load FlutterWindowLoop")
return
}
feg.procFlutterTerminate, err = syscall.GetProcAddress(feg.flutter_embedder, "FlutterTerminate")
if err != nil {
log.Printf("error on load FlutterTerminate")
return
}
return feg, nil
}
func (feg *FlutterEmbedderGLFW) CreateFlutterWindowInSnapshotMode(height uint32, width uint32, assets_path string, icu_data_path string) (window uintptr, err error) {
window, _, errno := syscall.Syscall6(uintptr(feg.procCreateFlutterWindowInSnapshotMode), 5,
uintptr(height),
uintptr(width),
uintptr(unsafe.Pointer(syscall.StringBytePtr(assets_path))),
uintptr(unsafe.Pointer(syscall.StringBytePtr(icu_data_path))),
uintptr(unsafe.Pointer(syscall.StringBytePtr(""))),
0)
if errno != 0 {
return 0, errors.New(errno.Error())
}
if window == 0 {
return 0, errors.New("Create GLFW windows Failed")
}
return window, nil
}
func (feg *FlutterEmbedderGLFW) FlutterInit() error {
_, _, errno := syscall.Syscall(uintptr(feg.procFlutterInit), 0, 0, 0, 0)
if errno != 0 {
return errors.New(errno.Error())
}
return nil
}
func (feg *FlutterEmbedderGLFW) FlutterWindowLoop(window uintptr) error {
_, _, errno := syscall.Syscall(uintptr(feg.procFlutterWindowLoop), 1, window, 0, 0)
if errno != 0 {
return errors.New(errno.Error())
}
return nil
}
func (feg *FlutterEmbedderGLFW) FlutterTerminate() error {
_, _, errno := syscall.Syscall(uintptr(feg.procFlutterTerminate), 0, 0, 0, 0)
if errno != 0 {
return errors.New(errno.Error())
}
return nil
}
func main() {
feg, _ := LoadFlutterEmbedderGLFW("")
err := feg.FlutterInit()
if err != nil {
log.Printf("FlutterInit Failed %s", err)
}
win, err := feg.CreateFlutterWindowInSnapshotMode(640, 480, "..\\..\\example\\flutter_app\\build\\flutter_assets", "..\\..\\library\\windows\\dependencies\\engine\\icudtl.dat")
if err != nil {
log.Printf("CreateFlutterWindowInSnapshotMode Failed %s", err)
}
err = feg.FlutterWindowLoop(win)
if err != nil {
log.Printf("FlutterWindowLoop Failed %s", err)
}
err = feg.FlutterTerminate()
if err != nil {
log.Printf("FlutterWindowLoop Failed %s", err)
}
//flutter_embedder //.CreateFlutterWindowInSnapshotMode()
}
|
package main
import (
"fmt"
"github.com/saylorsolutions/passlock"
)
const gcmNonceLen = 12
const scryptSaltLen = 32
const authenticationTagLen = 16
func main() {
secretData := []byte("secret sauce")
password := []byte("Pa$$w0rd")
cipherText, err := passlock.EncryptBytes(password, secretData)
if err != nil {
panic("An error occurred encrypting data!")
}
totalCipherTextLen := len(cipherText)
fmt.Printf("Length of encrypted data: %d\n", totalCipherTextLen)
fmt.Printf("Actual cipher text length plus authentication tag is total length - GCM nonce length - scrypt salt length\n")
actualCipherTextLen := totalCipherTextLen - gcmNonceLen - scryptSaltLen
fmt.Printf("\t%d - %d - %d = %d\n", totalCipherTextLen, gcmNonceLen, scryptSaltLen, actualCipherTextLen)
fmt.Printf("Removing the authentication tag length (%d) from the cipher text length yields the same size as the input\n", authenticationTagLen)
fmt.Printf("\t%d - %d = %d\n", actualCipherTextLen, authenticationTagLen, actualCipherTextLen-authenticationTagLen)
fmt.Printf("\tlen('%s') = %d\n\n", string(secretData), len(secretData))
fmt.Printf("This means that for any input size of n bytes, the encrypted size will be n + %d + %d + %d bytes\n", gcmNonceLen, scryptSaltLen, authenticationTagLen)
}
|
package main
import (
"fmt"
"os"
"github.com/Cloud-Foundations/Dominator/fleetmanager/topology"
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/json"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
func getMachineInfoSubcommand(args []string, logger log.DebugLogger) error {
err := getMachineInfo(args[0], logger)
if err != nil {
return fmt.Errorf("error getting machine info: %s", err)
}
return nil
}
func getMachineInfo(hostname string, logger log.DebugLogger) error {
fmCR := srpc.NewClientResource("tcp",
fmt.Sprintf("%s:%d", *fleetManagerHostname, *fleetManagerPortNum))
defer fmCR.ScheduleClose()
if info, err := getInfoForMachine(fmCR, hostname, logger); err != nil {
return err
} else {
return json.WriteWithIndent(os.Stdout, " ", info)
}
}
func getInfoForMachine(fmCR *srpc.ClientResource, hostname string,
logger log.DebugLogger) (
fm_proto.GetMachineInfoResponse, error) {
if *fleetManagerHostname != "" {
return getInfoForMachineFromFleetManager(fmCR, hostname)
}
info, err := getInfoForMachineFromTopology(hostname, logger)
if err != nil {
return fm_proto.GetMachineInfoResponse{}, err
} else {
return *info, nil
}
}
func getInfoForMachineFromFleetManager(fmCR *srpc.ClientResource,
hostname string) (fm_proto.GetMachineInfoResponse, error) {
request := fm_proto.GetMachineInfoRequest{Hostname: hostname}
var reply fm_proto.GetMachineInfoResponse
client, err := fmCR.GetHTTP(nil, 0)
if err != nil {
return fm_proto.GetMachineInfoResponse{}, err
}
defer client.Put()
err = client.RequestReply("FleetManager.GetMachineInfo", request, &reply)
if err != nil {
return fm_proto.GetMachineInfoResponse{}, err
}
if err := errors.New(reply.Error); err != nil {
return fm_proto.GetMachineInfoResponse{}, err
}
return reply, nil
}
func getInfoForMachineFromTopology(hostname string, logger log.DebugLogger) (
*fm_proto.GetMachineInfoResponse, error) {
if *topologyDir == "" {
return nil, errors.New("no topologyDir specified")
}
topo, err := topology.LoadWithParams(topology.Params{
Logger: logger,
TopologyDir: *topologyDir,
})
if err != nil {
return nil, err
}
machines, err := topo.ListMachines("")
if err != nil {
return nil, err
}
var machinePtr *fm_proto.Machine
for _, machine := range machines {
if machine.Hostname == hostname {
machinePtr = machine
break
}
}
if machinePtr == nil {
return nil,
fmt.Errorf("machine: %s not found in topology", hostname)
}
subnets, err := topo.GetSubnetsForMachine(hostname)
if err != nil {
return nil, err
}
info := fm_proto.GetMachineInfoResponse{Machine: *machinePtr}
info.Subnets = make([]*hyper_proto.Subnet, 0, len(subnets))
for _, subnet := range subnets {
info.Subnets = append(info.Subnets, &subnet.Subnet)
}
return &info, nil
}
|
package dev
// ComMode ...
type ComMode int
const (
// UartMode ...
UartMode ComMode = iota
// TTLMode ...
TTLMode
)
// DistMeter ...
type DistMeter interface {
Dist() float64
Close()
}
// US100Config ...
type US100Config struct {
Mode ComMode
Trig int8
Echo int8
Dev string
Baud int
Retry int
}
|
package cis
import (
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/antonioalfa22/egida/pkg/ansible"
"github.com/antonioalfa22/go-utils/collections"
)
func ShowPointsMenu(connection string) {
var points []string
prompt := &survey.MultiSelect{
Message: "Select CIS Points:",
Options: []string{
"1.1.1.1- Ensure mounting of cramfs filesystems is disabled (Scored)",
"1.1.1.2- Ensure mounting of freevxfs filesystems is disabled (Scored)",
"1.1.1.3- Ensure mounting of jffs2 filesystems is disabled (Scored)",
"1.1.1.4- Ensure mounting of hfs filesystems is disabled (Scored)",
"1.1.1.5- Ensure mounting of hfsplus filesystems is disabled (Scored)",
"1.1.1.6- Ensure mounting of squashfs filesystems is disabled (Scored)",
"1.1.1.7- Ensure mounting of udf filesystems is disabled (Scored)",
"1.1.1.8- Ensure mounting of FAT filesystems is limited (Not Scored)",
"1.1.2- Ensure /tmp is configured (Scored)",
"1.1.3- Ensure nodev option set on /tmp partition (Scored)",
"1.1.4- Ensure nosuid option set on /tmp partition (Scored)",
"1.1.5- Ensure noexec option set on /tmp partition (Scored)",
"1.1.6- Ensure separate partition exists for /var (Scored)",
"1.1.7- Ensure separate partition exists for /var/tmp (Scored)",
"1.1.8- Ensure nodev option set on /var/tmp partition (Scored)",
"1.1.9- Ensure nosuid option set on /var/tmp partition (Scored)",
"1.1.10- Ensure noexec option set on /var/tmp partition (Scored)",
"1.1.11- Ensure separate partition exists for /var/log (Scored)",
"1.1.12- Ensure separate partition exists for /var/log/audit (Scored)",
"1.1.13- Ensure separate partition exists for /home (Scored)",
"1.1.14- Ensure nodev option set on /home partition (Scored)",
"1.1.15- Ensure nodev option set on /dev/shm partition (Scored)",
"1.1.16- Ensure nosuid option set on /dev/shm partition (Scored)",
"1.1.17- Ensure noexec option set on /dev/shm partition (Scored)",
"1.1.18- Ensure nodev option set on removable media partitions (Not Scored)",
"1.1.19- Ensure nosuid option set on removable media partitions (Not Scored)",
"1.1.20- Ensure noexec option set on removable media partitions (Not Scored)",
"1.1.21- Ensure sticky bit is set on all world-writable directories (Scored)",
"1.1.22- Disable Automounting (Scored)",
"1.1.23- Disable USB Storage (Scored)",
"1.2.1- Ensure package manager repositories are configured (Not Scored)",
"1.2.2- Ensure GPG keys are configured (Not Scored)",
"1.3.1- Ensure sudo is installed (Scored)",
"1.3.2- Ensure sudo commands use pty (Scored)",
"1.3.3- Ensure sudo log file exists (Scored)",
"1.4.1- Ensure AIDE is installed (Scored)",
"1.4.2- Ensure filesystem integrity is regularly checked (Scored)",
"1.5.1- Ensure permissions on bootloader config are configured (Scored)",
"1.5.2- Ensure bootloader password is set (Scored)",
"1.5.3- Ensure authentication required for single user mode (Scored)",
"1.5.4- Ensure interactive boot is not enabled (Not Scored)",
"1.6.1- Ensure XD/NX support is enabled (Scored)",
"1.6.2- Ensure address space layout randomization (ASLR) is enabled (Scored)",
"1.6.3- Ensure prelink is disabled (Scored)",
"1.6.4- Ensure core dumps are restricted (Scored)",
"1.7.1- Configure AppArmor",
"1.7.1.1- Ensure XD/NX support is enabled (Scored)",
"1.7.1.2- Ensure AppArmor is installed (Scored)",
"1.7.1.3- Ensure all AppArmor Profiles are in enforce or complain mode (Scored)",
"1.7.1.4- Ensure all AppArmor Profiles are enforcing (Scored)",
"1.8.1- Command Line Warning Banners",
"1.8.1.1- Ensure message of the day is configured properly (Scored)",
"1.8.1.2- Ensure local login warning banner is configured properly (Scored)",
"1.8.1.3- Ensure remote login warning banner is configured properly (Scored)",
"1.8.1.4- Ensure permissions on /etc/motd are configured (Scored)",
"1.8.1.5- Ensure permissions on /etc/issue are configured (Scored)",
"1.8.1.6- Ensure permissions on /etc/issue.net are configured (Scored)",
"1.8.2- Ensure GDM login banner is configured (Scored)",
"1.9- Ensure updates, patches, and additional security software are installed (Not Scored)",
"2.1.1- Ensure xinetd is not installed (Scored)",
"2.1.2- Ensure openbsd-inetd is not installed (Scored)",
"2.2.1.1- Ensure time synchronization is in use (Scored)",
"2.2.1.2- Ensure systemd-timesyncd is configured (Not Scored)",
"2.2.1.3- Ensure chrony is configured (Scored)",
"2.2.1.4- Ensure ntp is configured (Scored)",
"2.2.2- Ensure X Window System is not installed (Scored)",
"2.2.3- Ensure Avahi Server is not enabled (Scored)",
"2.2.4- Ensure CUPS is not enabled (Scored)",
"2.2.5- Ensure DHCP Server is not enabled (Scored)",
"2.2.6- Ensure LDAP server is not enabled (Scored)",
"2.2.7- Ensure NFS and RPC are not enabled (Scored)",
"2.2.8- Ensure DNS Server is not enabled (Scored)",
"2.2.9- Ensure FTP Server is not enabled (Scored)",
"2.2.10- Ensure HTTP server is not enabled (Scored)",
"2.2.11- Ensure email services are not enabled (Scored)",
"2.2.12- Ensure Samba is not enabled (Scored)",
"2.2.13- Ensure HTTP Proxy Server is not enabled (Scored)",
"2.2.14- Ensure SNMP Server is not enabled (Scored)",
"2.2.15- Ensure mail transfer agent is configured for local-only mode (Scored)",
"2.2.16- Ensure rsync service is not enabled (Scored)",
"2.2.17- Ensure NIS Server is not enabled (Scored)",
"2.3.1- Ensure NIS Client is not installed (Scored)",
"2.3.2- Ensure rsh client is not installed (Scored)",
"2.3.3- Ensure talk client is not installed (Scored)",
"2.3.4- Ensure telnet client is not installed (Scored)",
"2.3.5- Ensure LDAP client is not installed (Scored)",
"3.1.1- Ensure packet redirect sending is disabled (Scored)",
"3.1.2- Ensure IP forwarding is disabled (Scored)",
"3.2.1- Ensure source routed packets are not accepted (Scored)",
"3.2.2- Ensure ICMP redirects are not accepted (Scored)",
"3.2.3- Ensure secure ICMP redirects are not accepted (Scored)",
"3.2.4- Ensure suspicious packets are logged (Scored)",
"3.2.5- Ensure broadcast ICMP requests are ignored (Scored)",
"3.2.6- Ensure bogus ICMP responses are ignored (Scored)",
"3.2.7- Ensure Reverse Path Filtering is enabled (Scored)",
"3.2.8- Ensure TCP SYN Cookies is enabled (Scored)",
"3.2.9- Ensure IPv6 router advertisements are not accepted (Scored)",
"3.3.1- Ensure TCP Wrappers is installed (Not Scored)",
"3.3.2- Ensure /etc/hosts.allow is configured (Not Scored)",
"3.3.3- Ensure /etc/hosts.deny is configured (Not Scored)",
"3.3.4- Ensure permissions on /etc/hosts.allow are configured (Scored)",
"3.3.5- Ensure permissions on /etc/hosts.deny are configured (Scored)",
"3.4.1- Ensure DCCP is disabled (Scored)",
"3.4.2- Ensure SCTP is disabled (Scored)",
"3.4.3- Ensure RDS is disabled (Scored)",
"3.4.4- Ensure TIPC is disabled (Scored)",
"3.5.1.1- Ensure a Firewall package is installed (Scored)",
"3.5.2.1- Ensure ufw service is enabled (Scored)",
"3.5.2.2- Ensure default deny firewall policy (Scored)",
"3.5.2.3- Ensure loopback traffic is configured (Scored)",
"3.5.2.4- Ensure outbound connections are configured (Not Scored)",
"3.5.2.5- Ensure firewall rules exist for all open ports (Not Scored)",
"3.6- Ensure wireless interfaces are disabled (Scored)",
"3.7- Disable IPv6 (Not Scored)",
"4.1.1.1- Ensure auditd is installed (Scored)",
"4.1.1.2- Ensure auditd service is enabled (Scored)",
"4.1.1.3- Ensure auditing for processes that start prior to auditd is enabled (Scored)",
"4.1.1.4- Ensure audit_backlog_limit is sufficient (Scored)",
"4.1.2.1- Ensure audit log storage size is configured (Scored)",
"4.1.2.2- Ensure audit logs are not automatically deleted (Scored)",
"4.1.2.3- Ensure system is disabled when audit logs are full (Scored)",
"4.1.3- Ensure events that modify date and time information are collected (Scored)",
"4.1.4- Ensure events that modify user/group information are collected (Scored)",
"4.1.5- Ensure events that modify the systems network environment are collected (Scored)",
"4.1.6- Ensure events that modify the systems Mandatory Access Controls are collected (Scored)",
"4.1.7- Ensure login and logout events are collected (Scored)",
"4.1.8- Ensure session initiation information is collected (Scored)",
"4.1.9- Ensure discretionary access control permission modification events are collected (Scored)",
"4.1.10- Ensure unsuccessful unauthorized file access attempts are collected (Scored)",
"4.1.11- Ensure events that modify date and time information are collected (Scored)",
"4.1.12- Ensure events that modify date and time information are collected (Scored)",
"4.1.13- Ensure events that modify date and time information are collected (Scored)",
"4.1.14- Ensure events that modify date and time information are collected (Scored)",
"4.1.15- Ensure events that modify date and time information are collected (Scored)",
"4.1.16- Ensure events that modify date and time information are collected (Scored)",
"4.1.17- Ensure events that modify date and time information are collected (Scored)",
"4.2.1.1- Ensure rsyslog is installed (Scored)",
"4.2.1.2- Ensure rsyslog Service is enabled (Scored)",
"4.2.1.3- Ensure logging is configured (Not Scored)",
"4.2.1.4- Ensure rsyslog default file permissions configured (Scored)",
"4.2.1.5- Ensure rsyslog is configured to send logs to a remote log host (Scored)",
"4.2.1.6- Ensure remote rsyslog messages are only accepted on designated log hosts. (Not Scored)",
"4.3- Ensure logrotate is configured (Not Scored)",
"5.1.1- Ensure cron daemon is enabled (Scored)",
"5.1.2- Ensure permissions on /etc/crontab are configured (Scored)",
"5.1.3- Ensure permissions on /etc/cron.hourly are configured (Scored)",
"5.1.4- Ensure permissions on /etc/cron.daily are configured (Scored)",
"5.1.5- Ensure permissions on /etc/cron.weekly are configured (Scored)",
"5.1.6- Ensure permissions on /etc/cron.monthly are configured (Scored)",
"5.1.7- Ensure permissions on /etc/cron.d are configured (Scored)",
"5.1.8- Ensure at/cron is restricted to authorized users (Scored)",
"5.2.1- Ensure permissions on /etc/ssh/sshd_config are configured (Scored)",
"5.2.2- Ensure permissions on SSH private host key files are configured (Scored)",
"5.2.3- Ensure permissions on SSH public host key files are configured (Scored)",
"5.2.4- Ensure SSH Protocol is not set to 1 (Scored)",
"5.2.5- Ensure SSH LogLevel is appropriate (Scored)",
"5.2.6- Ensure SSH X11 forwarding is disabled (Scored)",
"5.2.7- Ensure SSH MaxAuthTries is set to 4 or less (Scored)",
"5.2.8- Ensure SSH IgnoreRhosts is enabled (Scored)",
"5.2.9- Ensure SSH HostbasedAuthentication is disabled (Scored)",
"5.2.10- Ensure SSH root login is disabled (Scored)",
"5.2.11- Ensure SSH PermitEmptyPasswords is disabled (Scored)",
"5.2.12- Ensure SSH PermitUserEnvironment is disabled (Scored)",
"5.2.13- Ensure only strong Ciphers are used (Scored)",
"5.2.14- Ensure only strong MAC algorithms are used (Scored)",
"5.2.15- Ensure only strong Key Exchange algorithms are used (Scored)",
"5.2.16- Ensure SSH Idle Timeout Interval is configured (Scored)",
"5.2.17- Ensure SSH LoginGraceTime is set to one minute or less (Scored)",
"5.2.18- Ensure SSH access is limited (Scored)",
"5.2.19- Ensure SSH warning banner is configured (Scored)",
"5.2.20- Ensure SSH PAM is enabled (Scored)",
"5.2.21- Ensure SSH AllowTcpForwarding is disabled (Scored)",
"5.2.22- Ensure SSH MaxStartups is configured (Scored)",
"5.2.23- Ensure SSH MaxSessions is set to 4 or less (Scored)",
"5.3.1- Ensure password creation requirements are configured (Scored)",
"5.3.2- Ensure lockout for failed password attempts is configured (Scored)",
"5.3.3- Ensure password reuse is limited (Scored)",
"5.3.4- Ensure password hashing algorithm is SHA-512 (Scored)",
"5.4.1.1- Ensure password expiration is 365 days or less (Scored)",
"5.4.1.2- Ensure minimum days between password changes is configured (Scored)",
"5.4.1.3- Ensure password expiration warning days is 7 or more (Scored)",
"5.4.1.4- Ensure inactive password lock is 30 days or less (Scored)",
"5.4.1.5- Ensure all users last password change date is in the past (Scored)",
"5.4.2- Ensure system accounts are secured (Scored)",
"5.4.3- Ensure default group for the root account is GID 0 (Scored)",
"5.4.4- Ensure default user umask is 027 or more restrictive (Scored)",
"5.4.5- Ensure default user shell timeout is 900 seconds or less (Scored)",
"5.5- Ensure root login is restricted to system console (Not Scored)",
"5.6- Ensure access to the su command is restricted (Scored)",
"6.1.1- Audit system file permissions (Not Scored)",
"6.1.2- Ensure permissions on /etc/passwd are configured (Scored)",
"6.1.3- Ensure permissions on /etc/gshadow- are configured (Scored)",
"6.1.4- Ensure permissions on /etc/shadow are configured (Scored)",
"6.1.5- Ensure permissions on /etc/group are configured (Scored)",
"6.1.6- Ensure permissions on /etc/passwd- are configured (Scored)",
"6.1.7- Ensure permissions on /etc/shadow- are configured (Scored)",
"6.1.8- Ensure permissions on /etc/group- are configured (Scored)",
"6.1.9- Ensure permissions on /etc/gshadow are configured (Scored)",
"6.1.10- Ensure no world writable files exist (Scored)",
"6.1.11- Ensure no unowned files or directories exist (Scored)",
"6.1.12- Ensure no ungrouped files or directories exist (Scored)",
"6.1.13- Audit SUID executables (Not Scored)",
"6.1.14- Audit SGID executables (Not Scored)",
"6.2.1- Ensure password fields are not empty (Scored)",
"6.2.2- Ensure no legacy " + " entries exist in /etc/passwd (Scored)",
"6.2.3- Ensure all users home directories exist (Scored)",
"6.2.4- Ensure no legacy " + " entries exist in /etc/shadow (Scored)",
"6.2.5- Ensure no legacy " + " entries exist in /etc/group (Scored)",
"6.2.6- Ensure root is the only UID 0 account (Scored)",
"6.2.7- Ensure root PATH Integrity (Scored)",
"6.2.8- Ensure users home directories permissions are 750 or more restrictive (Scored)",
"6.2.9- Ensure users own their home directories (Scored)",
"6.2.10- Ensure users dot files are not group or world writable (Scored)",
"6.2.11- Ensure no users have .forward files (Scored)",
"6.2.12- Ensure no users have .netrc files (Scored)",
"6.2.13- Ensure users .netrc Files are not group or world accessible (Scored)",
"6.2.14- Ensure no users have .rhosts files (Scored)",
"6.2.15- Ensure all groups in /etc/passwd exist in /etc/group (Scored)",
"6.2.16- Ensure no duplicate UIDs exist (Scored)",
"6.2.17- Ensure no duplicate GIDs exist (Scored)",
"6.2.18- Ensure no duplicate user names exist (Scored)",
"6.2.19- Ensure no duplicate group names exist (Scored)",
"6.2.20- Ensure shadow group is empty (Scored)",
},
}
_ = survey.AskOne(prompt, &points)
result := collections.Map(points, func(p string) string { return "rule_" + strings.Split(p, "-")[0] })
ansible.RunMenuPlaybook(result.([]string), connection)
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package armhelpers
import (
"context"
"testing"
)
func TestResourceSkusInterface(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterListResourceSkus()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
page, err := azureClient.ListResourceSkus(context.Background(), "")
if err != nil {
t.Error(err)
}
if page == nil || len(page.Values()) == 0 {
t.Fatalf("expected skus not to be empty")
}
}
|
package cmd
import (
// System
"fmt"
"os"
"strconv"
"strings"
// 3rd Party
log "github.com/sirupsen/logrus"
)
// Get future release
func GetFutureRelease(o *ReleaseOptions, t string) {
release := map[string]int{
"major": 0,
"minor": 1,
"patch": 2,
}
currentRelease := o.FutureRelease
intRelease, releaseDigits := ReleaseToInt(currentRelease)
switch release[t] {
case 0:
newRelease := IncrementRelease(currentRelease, intRelease, release[t], releaseDigits)
o.FutureRelease = newRelease
case 1:
newRelease := IncrementRelease(currentRelease, intRelease, release[t], releaseDigits)
o.FutureRelease = newRelease
case 2:
newRelease := IncrementRelease(currentRelease, intRelease, release[t], releaseDigits)
o.FutureRelease = newRelease
default:
log.Info("Wrong option, Try again")
}
}
// Convert release to int
func ReleaseToInt(currentRelease string) ([]int, []string) {
// Parse release digits major.minor.patch by .
releaseDigits := strings.Split(currentRelease, ".")
// Check that follows x.x.x pattern
if len(releaseDigits) > 3 {
log.Errorf("Your tag %s, does not follow the semver pattern x.x.x", currentRelease)
os.Exit(1)
}
outputRelease := []int{}
// Convert release digits from string to int
for _, digit := range releaseDigits {
aux, err := strconv.Atoi(digit)
if err != nil {
log.Errorf("\t %v", err)
log.Fatalf("\t Release %v not supported", currentRelease)
}
outputRelease = append(outputRelease, aux)
}
return outputRelease, releaseDigits
}
// Increments the value of the release
func IncrementRelease(currentRelease string, intRelease []int, release int, releaseDigits []string) string {
// Increments the value
intRelease[release]++
increment := strconv.Itoa(intRelease[release])
releaseDigits[release] = increment
// Format the new release
newRelease := fmt.Sprintf("%s.%s.%s", releaseDigits[0], releaseDigits[1], releaseDigits[2])
log.Infof("\t New release is: %v", newRelease)
return newRelease
}
|
package dushengchen
/*
Submission:
https://leetcode.com/submissions/detail/357219769/
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func mergeKListsV2(lists []*ListNode) *ListNode {
if len(lists) == 0 {
return nil
}
cur := lists[0]
for i:=1; i < len(lists); i++ {
cur = mergeTwoLists(cur, lists[i])
}
return cur
}
|
package reverseproxy
import (
"net/http/httputil"
)
func NewReverseProxy(director Director, responseModifier ResponseModifier, transporter Transporter) *httputil.ReverseProxy {
return &httputil.ReverseProxy{
Director: director.Get(),
ModifyResponse: responseModifier.Get(),
Transport: transporter.Get(),
}
}
|
package fib
func isNonNegative(n int) bool {
if 0 <= n {
return true
}
return false
}
// Fib1 returns of the fibonacci of input
func Fib1(n int) int {
if !isNonNegative(n) {
panic("fibonacci input must be non-negative")
}
if n < 2 {
return n
}
return Fib1(n-2) + Fib1(n-1)
}
|
package projector
import (
"context"
"fmt"
"net"
"strings"
"time"
)
//type Devices map[string]Device
type Device struct {
Address string
Name string
context.Context
context.CancelFunc
commands chan Command
State State
EventCallbacks map[string]chan interface{}
}
type Callback func(string, interface{})
type State struct {
Power
Freeze
Blank
}
func (d *Device) Register(clientID string, callback chan interface{}) {
fmt.Printf("[Projector:%s] Registering callbacks. Client:%s\n", d.Address, clientID)
if d.EventCallbacks == nil {
d.EventCallbacks = make(map[string]chan interface{})
}
d.EventCallbacks[clientID] = callback
}
func (d *Device) Deregister(id string) {
delete(d.EventCallbacks, id)
}
func (d *Device) Run() {
fmt.Printf("Running Projector(%s)\n", d.Address)
for {
err := d.Connect()
if err != nil {
fmt.Printf("ERROR projector.Connect(%s): %v\n", d.Address, err)
time.Sleep(time.Second * 5)
continue
}
//go d.reader()
//go d.writer()
<- d.Done()
}
}
func (d *Device) Connect() (err error) {
//fmt.Printf("projector.Connect(%s)\n", d.Address)
conn, err := net.DialTimeout("tcp", d.Address, time.Second * 1)
if err != nil {
return fmt.Errorf("DialTCP: %v", err)
} else {
defer conn.Close()
}
// TODO test if it's actually alive
// start new session
if d.Context != nil {
d.CancelFunc()
}
d.Context, d.CancelFunc = context.WithCancel(context.Background())
d.commands = make(chan Command)
go d.commander()
return nil
}
func (d *Device) Command(cmd Command) {
d.commands <- cmd
}
func (d *Device) commander() {
for {
select {
case <- d.Done():
return
case cmd := <- d.commands:
// try until success
for {
response, err := d.command(cmd)
if err != nil {
fmt.Printf("ERROR %T: %v\n", cmd, err)
time.Sleep(time.Second)
continue
}
go d.handle(response)
break
}
}
}
}
func (d *Device) command(cmd Command) (response string, err error) {
fmt.Printf("[Projector.command] %#v\n", cmd)
var conn net.Conn
conn, err = net.DialTimeout("tcp", d.Address, time.Second * 1)
if err != nil {
return "", fmt.Errorf("DialTCP: %v", err)
} else {
defer conn.Close()
}
// Write
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
if err != nil {
return "", fmt.Errorf("ERROR setWriteDeadline: %v\n", err)
}
_, err = conn.Write([]byte(cmd.Request()))
if err != nil {
return "", fmt.Errorf("ERROR conn.Write: %v\n", err)
}
// Read
err = conn.SetReadDeadline(time.Now().Add(time.Second * 3))
if err != nil {
return "", fmt.Errorf("ERROR setReadDeadline: %v\n", err)
}
data := make([]byte, 100)
n, err := conn.Read(data)
if err != nil {
fmt.Printf("ERROR conn.Read: %v\n", err)
return
}
data = data[:n]
return string(data), nil
}
func (d *Device) handle(response string) {
//fmt.Printf("[Projectorhandle] %#v\n", d)
// TODO detect command type better
for _, prefix := range d.State.Power.Prefixes() {
if strings.HasPrefix(response, prefix) {
d.State.Power.Handle(response)
d.callback(d.State.Power)
return
}
}
for _, prefix := range d.State.Freeze.Prefixes() {
if strings.HasPrefix(response, prefix) {
d.State.Freeze.Handle(response)
d.callback(d.State.Freeze)
return
}
}
for _, prefix := range d.State.Blank.Prefixes() {
if strings.HasPrefix(response, prefix) {
d.State.Blank.Handle(response)
d.callback(d.State.Blank)
return
}
}
fmt.Printf("UNHANDLED project.handle(%s)\n", response)
}
func (d *Device) callback(cmd interface{}) {
for _, callback := range d.EventCallbacks {
callback <- cmd
}
}
|
package recovery
import (
"context"
"testing"
)
func TestOnce(t *testing.T) {
defer func() {
if recover() != nil {
t.Error("fail")
}
}()
next := func(ctx context.Context, req interface{}) (interface{}, error) {
panic("panic reason")
}
_, e := Recovery()(next)(context.Background(), "panic")
t.Logf("succ and reason is %v", e)
}
|
package main
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func TestEnvSetting(t *testing.T) {
env := os.Getenv("GO_ENV")
assert.Equal(t, "development", env)
neoURL := os.Getenv("NEO4J_URL")
assert.Equal(t, "bolt://neo4j:neo4jadmin@localhost:7687", neoURL)
}
|
package jwt
import (
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/rodzy/flash/models"
)
//Spawn it's the generator for our JWt
func Spawn(u models.User) (string, error) {
pass := []byte("YoooHelloGolang_")
payload := jwt.MapClaims{
"email": u.Email,
"name": u.Name,
"lastname": u.LastName,
"birthdate": u.BirthDate,
"bio": u.Bio,
"location": u.Location,
"website": u.WebSite,
"_id": u.ID.Hex(),
"expire": time.Now().Add(time.Hour * 24).Unix(),
}
token, err := jwt.NewWithClaims(jwt.SigningMethodHS256, payload).SignedString(pass)
if err != nil {
return token, err
}
return token, nil
}
|
package service
import (
"log"
"github.com/rudeigerc/broker-gateway/mapper"
"github.com/rudeigerc/broker-gateway/model"
)
type Firm struct {
}
func (f Firm) Firms() []model.Firm {
m := mapper.NewMapper()
var firms []model.Firm
err := m.Find(&firms)
if err != nil {
log.Printf("[service.firm.Firms] [ERROR] %s", err)
}
return firms
}
|
package config
import (
"github.com/mitchellh/go-homedir"
"gopkg.in/yaml.v2"
"io"
"io/ioutil"
)
// Client keeps all client configuration settings
var Client ClientConfig = ClientConfig{}
// Basically, our config is inside the "config" section. So we load the whole file and only store the Cfg section
type wrappedClientConfig struct {
Cfg ClientConfig `yaml:"config"`
}
// ClientConfig is the representation of the client configuration
type ClientConfig struct {
Accounts struct {
Path string `yaml:"path"`
ProofOfWork int `yaml:"proof_of_work"`
} `yaml:"accounts"`
Composer struct {
Editor string `yaml:"editor"`
} `yaml:"composer"`
Server struct {
AllowInsecure bool `yaml:"allow_insecure"`
DebugHttp bool `yaml:"debug_http"`
} `yaml:"server"`
Resolver struct {
Remote struct {
Enabled bool `yaml:"enabled"`
URL string `yaml:"url"`
} `yaml:"remote"`
} `yaml:"resolver"`
}
// LoadConfig loads the client configuration from the given path
func (c *ClientConfig) LoadConfig(r io.Reader) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
var lc wrappedClientConfig = wrappedClientConfig{}
err = yaml.Unmarshal(data, &lc)
if err != nil {
return err
}
// We only care about the Cfg section. This keeps our "config:" section in the yaml file but we can still use
// config.Client.Logger.Level instead of config.Client.Cfg.Logger.Level
*c = lc.Cfg
// Expand homedirs in configuration
c.Accounts.Path, _ = homedir.Expand(c.Accounts.Path)
return nil
}
|
package backend
import (
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Sessions", func() {
It("should create a Session without crashing", func() {
createSession := func() {
NewSession("Test", 1)
}
Expect(createSession).ShouldNot(Panic())
})
It("should add a note to a session", func() {
session := NewSession("Test", 1)
note := NewNote("Test note", time.Now())
session.AddNote(note)
Expect(session.Notes).To(ContainElement(note))
})
It("should not add empty notes to the session", func() {
session := NewSession("Test", 1)
note := NewNote("", time.Now())
session.AddNote(note)
Expect(session.Notes).ToNot(ContainElement(note))
})
})
var _ = Describe("Session number validator", func() {
It("should allow positive session numbers", func() {
Expect(ValidateSessionNumber("1")).To(BeNil())
})
It("should allow empty input", func() {
Expect(ValidateSessionNumber("")).To(BeNil())
})
It("should not allow negative numbers", func() {
Expect(ValidateSessionNumber("-2")).ToNot(BeNil())
})
It("should not allow non-numeric characters", func() {
Expect(ValidateSessionNumber("nan")).ToNot(BeNil())
})
It("should not allow float numbers", func() {
Expect(ValidateSessionNumber("1.23")).ToNot(BeNil())
})
})
var _ = Describe("JSON Serialization", func() {
It("should serialize the session title", func() {
title := "The Conquest at Calimport"
s := NewSession(title, 0)
Expect(s.ToJSON()).To(ContainSubstring("\"SessionTitle\":\"" + title + "\""))
})
It("should serialze the session number", func() {
number := 9
s := NewSession("Reunion in the Face of Adversity", number)
Expect(s.ToJSON()).To(ContainSubstring("\"SessionNumber\":" + strconv.Itoa(number)))
})
It("should serialize the session date", func() {
date := time.Date(2021, time.June, 22, 15, 0, 0, 0, time.FixedZone("UTC-0", 0))
s := NewSession("The Conquest at Calimport", 0, withCustomDate(date))
Expect(s.ToJSON()).To(ContainSubstring("\"Date\":\"2021-06-22T15:00:00Z\""))
})
It("should serialize with no notes if none were added", func() {
s := NewSession("The Return of Aust Redwyn", 0)
Expect(s.ToJSON()).To(ContainSubstring("\"Notes\":[]"))
})
It("should serialize with notes if any were added", func() {
s := NewSession("The Conquest at Calimport", 0)
date := time.Date(2021, time.June, 22, 15, 0, 0, 0, time.FixedZone("UTC-0", 0))
s.AddNote(NewNote("Xenthe almost died", date))
Expect(s.ToJSON()).To(ContainSubstring("[{\"Content\":\"Xenthe almost died\",\"Time\":\"2021-06-22T15:00:00Z\"}]"))
})
})
var _ = Describe("JSON Deserialization", func() {
It("should correctly deserialize the session title", func() {
title := "The Conquest at Calimport"
s := NewSession(title, 0)
s2, _ := FromJSON(s.ToJSON())
Expect(s2.SessionTitle).To(Equal(title))
})
It("should deserialize sessions without titles", func() {
s := NewSession("", 0)
s2, _ := FromJSON(s.ToJSON())
Expect(s2.SessionTitle).To(Equal(s.SessionTitle))
})
It("should correctly deserialize the session number", func() {
number := 9
s := NewSession("Reunion in the Face of Adversity", number)
s2, _ := FromJSON(s.ToJSON())
Expect(s2.SessionNumber).To(Equal(number))
})
It("should deserialize the session date", func() {
date := time.Date(2021, time.June, 22, 15, 0, 0, 0, time.FixedZone("UTC-0", 0))
s := NewSession("The Conquest at Calimport", 0, withCustomDate(date))
s2, _ := FromJSON(s.ToJSON())
Expect(s2.Date.Equal(s.Date)).To(BeTrue())
})
It("should deserialize sessions without notes", func() {
s := NewSession("The Return of Aust Redwyn", 0)
s2, _ := FromJSON(s.ToJSON())
Expect(s2.Notes).To(BeEmpty())
})
It("should deserialize sessions with notes", func() {
s := NewSession("The Conquest at Calimport", 0)
note := "Xenthe almost died"
s.AddNote(NewNote(note, time.Now()))
s2, _ := FromJSON(s.ToJSON())
Expect(s2.Notes).To(Not(BeEmpty()))
Expect(s2.Notes[0].Content).To(Equal(note))
})
It("should return an error if the data is malformed", func() {
data := "This isn't a JSON and cannot be loaded"
_, err := FromJSON(data)
Expect(err).To(Not(BeNil()))
})
It("should return a session with no session number if the JSON has a negative session number", func() {
data := `{"Notes":[{"Content":"Test string","Time":"2021-07-15T14:38:04.732366749-04:00"}],"Date":"2021-07-15T14:38:04.732366058-04:00","SessionTitle":"Test session","SessionNumber":-2}`
s, _ := FromJSON(data)
Expect(s.SessionNumber).To(Equal(NO_SESSION_NUMBER))
})
})
// TODO: add tests for saving functionality
// TODO: add tests for loading functionality
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dlppb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/dlp/dlp_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp"
)
// JobTriggerServer implements the gRPC interface for JobTrigger.
type JobTriggerServer struct{}
// ProtoToJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(e dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum) *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(n[len("DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum(e dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum) *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum(n[len("DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum converts a JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum(e dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum) *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum(n[len("DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobInspectConfigMinLikelihoodEnum converts a JobTriggerInspectJobInspectConfigMinLikelihoodEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum(e dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum) *dlp.JobTriggerInspectJobInspectConfigMinLikelihoodEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobInspectConfigMinLikelihoodEnum(n[len("DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum converts a JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum(e dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum(n[len("DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(e dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(n[len("DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum converts a JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum(e dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum(n[len("DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(e dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(n[len("DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum(e dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum(n[len("DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum converts a JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum enum from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum(e dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum) *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum_name[int32(e)]; ok {
e := dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum(n[len("DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerStatusEnum converts a JobTriggerStatusEnum enum from its proto representation.
func ProtoToDlpJobTriggerStatusEnum(e dlppb.DlpJobTriggerStatusEnum) *dlp.JobTriggerStatusEnum {
if e == 0 {
return nil
}
if n, ok := dlppb.DlpJobTriggerStatusEnum_name[int32(e)]; ok {
e := dlp.JobTriggerStatusEnum(n[len("DlpJobTriggerStatusEnum"):])
return &e
}
return nil
}
// ProtoToJobTriggerInspectJob converts a JobTriggerInspectJob object from its proto representation.
func ProtoToDlpJobTriggerInspectJob(p *dlppb.DlpJobTriggerInspectJob) *dlp.JobTriggerInspectJob {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJob{
StorageConfig: ProtoToDlpJobTriggerInspectJobStorageConfig(p.GetStorageConfig()),
InspectConfig: ProtoToDlpJobTriggerInspectJobInspectConfig(p.GetInspectConfig()),
InspectTemplateName: dcl.StringOrNil(p.GetInspectTemplateName()),
}
for _, r := range p.GetActions() {
obj.Actions = append(obj.Actions, *ProtoToDlpJobTriggerInspectJobActions(r))
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfig converts a JobTriggerInspectJobStorageConfig object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfig(p *dlppb.DlpJobTriggerInspectJobStorageConfig) *dlp.JobTriggerInspectJobStorageConfig {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfig{
DatastoreOptions: ProtoToDlpJobTriggerInspectJobStorageConfigDatastoreOptions(p.GetDatastoreOptions()),
CloudStorageOptions: ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptions(p.GetCloudStorageOptions()),
BigQueryOptions: ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptions(p.GetBigQueryOptions()),
HybridOptions: ProtoToDlpJobTriggerInspectJobStorageConfigHybridOptions(p.GetHybridOptions()),
TimespanConfig: ProtoToDlpJobTriggerInspectJobStorageConfigTimespanConfig(p.GetTimespanConfig()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigDatastoreOptions converts a JobTriggerInspectJobStorageConfigDatastoreOptions object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigDatastoreOptions(p *dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptions) *dlp.JobTriggerInspectJobStorageConfigDatastoreOptions {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigDatastoreOptions{
PartitionId: ProtoToDlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(p.GetPartitionId()),
Kind: ProtoToDlpJobTriggerInspectJobStorageConfigDatastoreOptionsKind(p.GetKind()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId converts a JobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(p *dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId) *dlp.JobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId{
ProjectId: dcl.StringOrNil(p.GetProjectId()),
NamespaceId: dcl.StringOrNil(p.GetNamespaceId()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigDatastoreOptionsKind converts a JobTriggerInspectJobStorageConfigDatastoreOptionsKind object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigDatastoreOptionsKind(p *dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptionsKind) *dlp.JobTriggerInspectJobStorageConfigDatastoreOptionsKind {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigDatastoreOptionsKind{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigCloudStorageOptions converts a JobTriggerInspectJobStorageConfigCloudStorageOptions object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptions(p *dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptions) *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptions {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigCloudStorageOptions{
FileSet: ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(p.GetFileSet()),
BytesLimitPerFile: dcl.Int64OrNil(p.GetBytesLimitPerFile()),
BytesLimitPerFilePercent: dcl.Int64OrNil(p.GetBytesLimitPerFilePercent()),
SampleMethod: ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum(p.GetSampleMethod()),
FilesLimitPercent: dcl.Int64OrNil(p.GetFilesLimitPercent()),
}
for _, r := range p.GetFileTypes() {
obj.FileTypes = append(obj.FileTypes, *ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(r))
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(p *dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet) *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet{
Url: dcl.StringOrNil(p.GetUrl()),
RegexFileSet: ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(p.GetRegexFileSet()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(p *dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet) *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet{
BucketName: dcl.StringOrNil(p.GetBucketName()),
}
for _, r := range p.GetIncludeRegex() {
obj.IncludeRegex = append(obj.IncludeRegex, r)
}
for _, r := range p.GetExcludeRegex() {
obj.ExcludeRegex = append(obj.ExcludeRegex, r)
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigBigQueryOptions converts a JobTriggerInspectJobStorageConfigBigQueryOptions object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptions(p *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptions) *dlp.JobTriggerInspectJobStorageConfigBigQueryOptions {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigBigQueryOptions{
TableReference: ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(p.GetTableReference()),
RowsLimit: dcl.Int64OrNil(p.GetRowsLimit()),
RowsLimitPercent: dcl.Int64OrNil(p.GetRowsLimitPercent()),
SampleMethod: ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum(p.GetSampleMethod()),
}
for _, r := range p.GetIdentifyingFields() {
obj.IdentifyingFields = append(obj.IdentifyingFields, *ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(r))
}
for _, r := range p.GetExcludedFields() {
obj.ExcludedFields = append(obj.ExcludedFields, *ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields(r))
}
for _, r := range p.GetIncludedFields() {
obj.IncludedFields = append(obj.IncludedFields, *ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields(r))
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference converts a JobTriggerInspectJobStorageConfigBigQueryOptionsTableReference object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(p *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference) *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsTableReference {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsTableReference{
ProjectId: dcl.StringOrNil(p.GetProjectId()),
DatasetId: dcl.StringOrNil(p.GetDatasetId()),
TableId: dcl.StringOrNil(p.GetTableId()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields converts a JobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields(p *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields) *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields converts a JobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields(p *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields) *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields converts a JobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields(p *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields) *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigHybridOptions converts a JobTriggerInspectJobStorageConfigHybridOptions object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigHybridOptions(p *dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptions) *dlp.JobTriggerInspectJobStorageConfigHybridOptions {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigHybridOptions{
Description: dcl.StringOrNil(p.GetDescription()),
TableOptions: ProtoToDlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptions(p.GetTableOptions()),
}
for _, r := range p.GetRequiredFindingLabelKeys() {
obj.RequiredFindingLabelKeys = append(obj.RequiredFindingLabelKeys, r)
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigHybridOptionsTableOptions converts a JobTriggerInspectJobStorageConfigHybridOptionsTableOptions object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptions(p *dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptions) *dlp.JobTriggerInspectJobStorageConfigHybridOptionsTableOptions {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigHybridOptionsTableOptions{}
for _, r := range p.GetIdentifyingFields() {
obj.IdentifyingFields = append(obj.IdentifyingFields, *ProtoToDlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields(r))
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields converts a JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields(p *dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields) *dlp.JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigTimespanConfig converts a JobTriggerInspectJobStorageConfigTimespanConfig object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigTimespanConfig(p *dlppb.DlpJobTriggerInspectJobStorageConfigTimespanConfig) *dlp.JobTriggerInspectJobStorageConfigTimespanConfig {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigTimespanConfig{
StartTime: dcl.StringOrNil(p.GetStartTime()),
EndTime: dcl.StringOrNil(p.GetEndTime()),
TimestampField: ProtoToDlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(p.GetTimestampField()),
EnableAutoPopulationOfTimespanConfig: dcl.Bool(p.GetEnableAutoPopulationOfTimespanConfig()),
}
return obj
}
// ProtoToJobTriggerInspectJobStorageConfigTimespanConfigTimestampField converts a JobTriggerInspectJobStorageConfigTimespanConfigTimestampField object from its proto representation.
func ProtoToDlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(p *dlppb.DlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampField) *dlp.JobTriggerInspectJobStorageConfigTimespanConfigTimestampField {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobStorageConfigTimespanConfigTimestampField{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfig converts a JobTriggerInspectJobInspectConfig object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfig(p *dlppb.DlpJobTriggerInspectJobInspectConfig) *dlp.JobTriggerInspectJobInspectConfig {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfig{
MinLikelihood: ProtoToDlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum(p.GetMinLikelihood()),
Limits: ProtoToDlpJobTriggerInspectJobInspectConfigLimits(p.GetLimits()),
IncludeQuote: dcl.Bool(p.GetIncludeQuote()),
ExcludeInfoTypes: dcl.Bool(p.GetExcludeInfoTypes()),
}
for _, r := range p.GetInfoTypes() {
obj.InfoTypes = append(obj.InfoTypes, *ProtoToDlpJobTriggerInspectJobInspectConfigInfoTypes(r))
}
for _, r := range p.GetCustomInfoTypes() {
obj.CustomInfoTypes = append(obj.CustomInfoTypes, *ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypes(r))
}
for _, r := range p.GetRuleSet() {
obj.RuleSet = append(obj.RuleSet, *ProtoToDlpJobTriggerInspectJobInspectConfigRuleSet(r))
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigInfoTypes converts a JobTriggerInspectJobInspectConfigInfoTypes object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigInfoTypes(p *dlppb.DlpJobTriggerInspectJobInspectConfigInfoTypes) *dlp.JobTriggerInspectJobInspectConfigInfoTypes {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigInfoTypes{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigLimits converts a JobTriggerInspectJobInspectConfigLimits object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigLimits(p *dlppb.DlpJobTriggerInspectJobInspectConfigLimits) *dlp.JobTriggerInspectJobInspectConfigLimits {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigLimits{
MaxFindingsPerItem: dcl.Int64OrNil(p.GetMaxFindingsPerItem()),
MaxFindingsPerRequest: dcl.Int64OrNil(p.GetMaxFindingsPerRequest()),
}
for _, r := range p.GetMaxFindingsPerInfoType() {
obj.MaxFindingsPerInfoType = append(obj.MaxFindingsPerInfoType, *ProtoToDlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType(r))
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType converts a JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType(p *dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType) *dlp.JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType{
InfoType: ProtoToDlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(p.GetInfoType()),
MaxFindings: dcl.Int64OrNil(p.GetMaxFindings()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType converts a JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(p *dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType) *dlp.JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType{
Name: dcl.StringOrNil(p.GetName()),
Version: dcl.StringOrNil(p.GetVersion()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypes converts a JobTriggerInspectJobInspectConfigCustomInfoTypes object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypes(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypes) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypes {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypes{
InfoType: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType(p.GetInfoType()),
Likelihood: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum(p.GetLikelihood()),
Dictionary: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary(p.GetDictionary()),
Regex: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegex(p.GetRegex()),
SurrogateType: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType(p.GetSurrogateType()),
StoredType: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType(p.GetStoredType()),
ExclusionType: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum(p.GetExclusionType()),
}
for _, r := range p.GetDetectionRules() {
obj.DetectionRules = append(obj.DetectionRules, *ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules(r))
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType converts a JobTriggerInspectJobInspectConfigCustomInfoTypesInfoType object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesInfoType {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesInfoType{
Name: dcl.StringOrNil(p.GetName()),
Version: dcl.StringOrNil(p.GetVersion()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDictionary object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionary {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionary{
WordList: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList(p.GetWordList()),
CloudStoragePath: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath(p.GetCloudStoragePath()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList{}
for _, r := range p.GetWords() {
obj.Words = append(obj.Words, r)
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath{
Path: dcl.StringOrNil(p.GetPath()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesRegex converts a JobTriggerInspectJobInspectConfigCustomInfoTypesRegex object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegex(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegex) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesRegex {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesRegex{
Pattern: dcl.StringOrNil(p.GetPattern()),
}
for _, r := range p.GetGroupIndexes() {
obj.GroupIndexes = append(obj.GroupIndexes, r)
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType converts a JobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType{}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType converts a JobTriggerInspectJobInspectConfigCustomInfoTypesStoredType object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesStoredType {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesStoredType{
Name: dcl.StringOrNil(p.GetName()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules{
HotwordRule: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule(p.GetHotwordRule()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule{
HotwordRegex: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex(p.GetHotwordRegex()),
Proximity: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity(p.GetProximity()),
LikelihoodAdjustment: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment(p.GetLikelihoodAdjustment()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex{
Pattern: dcl.StringOrNil(p.GetPattern()),
}
for _, r := range p.GetGroupIndexes() {
obj.GroupIndexes = append(obj.GroupIndexes, r)
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity{
WindowBefore: dcl.Int64OrNil(p.GetWindowBefore()),
WindowAfter: dcl.Int64OrNil(p.GetWindowAfter()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment(p *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment) *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment{
FixedLikelihood: ProtoToDlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(p.GetFixedLikelihood()),
RelativeLikelihood: dcl.Int64OrNil(p.GetRelativeLikelihood()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSet converts a JobTriggerInspectJobInspectConfigRuleSet object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSet(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSet) *dlp.JobTriggerInspectJobInspectConfigRuleSet {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSet{}
for _, r := range p.GetInfoTypes() {
obj.InfoTypes = append(obj.InfoTypes, *ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetInfoTypes(r))
}
for _, r := range p.GetRules() {
obj.Rules = append(obj.Rules, *ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRules(r))
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetInfoTypes converts a JobTriggerInspectJobInspectConfigRuleSetInfoTypes object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetInfoTypes(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetInfoTypes) *dlp.JobTriggerInspectJobInspectConfigRuleSetInfoTypes {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetInfoTypes{
Name: dcl.StringOrNil(p.GetName()),
Version: dcl.StringOrNil(p.GetVersion()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRules converts a JobTriggerInspectJobInspectConfigRuleSetRules object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRules(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRules) *dlp.JobTriggerInspectJobInspectConfigRuleSetRules {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRules{
HotwordRule: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule(p.GetHotwordRule()),
ExclusionRule: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule(p.GetExclusionRule()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule{
HotwordRegex: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex(p.GetHotwordRegex()),
Proximity: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity(p.GetProximity()),
LikelihoodAdjustment: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(p.GetLikelihoodAdjustment()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex{
Pattern: dcl.StringOrNil(p.GetPattern()),
}
for _, r := range p.GetGroupIndexes() {
obj.GroupIndexes = append(obj.GroupIndexes, r)
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity{
WindowBefore: dcl.Int64OrNil(p.GetWindowBefore()),
WindowAfter: dcl.Int64OrNil(p.GetWindowAfter()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment{
FixedLikelihood: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(p.GetFixedLikelihood()),
RelativeLikelihood: dcl.Int64OrNil(p.GetRelativeLikelihood()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule{
Dictionary: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary(p.GetDictionary()),
Regex: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex(p.GetRegex()),
ExcludeInfoTypes: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(p.GetExcludeInfoTypes()),
MatchingType: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum(p.GetMatchingType()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary{
WordList: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(p.GetWordList()),
CloudStoragePath: ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(p.GetCloudStoragePath()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList{}
for _, r := range p.GetWords() {
obj.Words = append(obj.Words, r)
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath{
Path: dcl.StringOrNil(p.GetPath()),
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex{
Pattern: dcl.StringOrNil(p.GetPattern()),
}
for _, r := range p.GetGroupIndexes() {
obj.GroupIndexes = append(obj.GroupIndexes, r)
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes{}
for _, r := range p.GetInfoTypes() {
obj.InfoTypes = append(obj.InfoTypes, *ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(r))
}
return obj
}
// ProtoToJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes object from its proto representation.
func ProtoToDlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(p *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes) *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes{
Name: dcl.StringOrNil(p.GetName()),
Version: dcl.StringOrNil(p.GetVersion()),
}
return obj
}
// ProtoToJobTriggerInspectJobActions converts a JobTriggerInspectJobActions object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActions(p *dlppb.DlpJobTriggerInspectJobActions) *dlp.JobTriggerInspectJobActions {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActions{
SaveFindings: ProtoToDlpJobTriggerInspectJobActionsSaveFindings(p.GetSaveFindings()),
PubSub: ProtoToDlpJobTriggerInspectJobActionsPubSub(p.GetPubSub()),
PublishSummaryToCscc: ProtoToDlpJobTriggerInspectJobActionsPublishSummaryToCscc(p.GetPublishSummaryToCscc()),
PublishFindingsToCloudDataCatalog: ProtoToDlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(p.GetPublishFindingsToCloudDataCatalog()),
JobNotificationEmails: ProtoToDlpJobTriggerInspectJobActionsJobNotificationEmails(p.GetJobNotificationEmails()),
PublishToStackdriver: ProtoToDlpJobTriggerInspectJobActionsPublishToStackdriver(p.GetPublishToStackdriver()),
}
return obj
}
// ProtoToJobTriggerInspectJobActionsSaveFindings converts a JobTriggerInspectJobActionsSaveFindings object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsSaveFindings(p *dlppb.DlpJobTriggerInspectJobActionsSaveFindings) *dlp.JobTriggerInspectJobActionsSaveFindings {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsSaveFindings{
OutputConfig: ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfig(p.GetOutputConfig()),
}
return obj
}
// ProtoToJobTriggerInspectJobActionsSaveFindingsOutputConfig converts a JobTriggerInspectJobActionsSaveFindingsOutputConfig object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfig(p *dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfig) *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfig {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfig{
Table: ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(p.GetTable()),
DlpStorage: ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage(p.GetDlpStorage()),
OutputSchema: ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum(p.GetOutputSchema()),
}
return obj
}
// ProtoToJobTriggerInspectJobActionsSaveFindingsOutputConfigTable converts a JobTriggerInspectJobActionsSaveFindingsOutputConfigTable object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(p *dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTable) *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigTable {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigTable{
ProjectId: dcl.StringOrNil(p.GetProjectId()),
DatasetId: dcl.StringOrNil(p.GetDatasetId()),
TableId: dcl.StringOrNil(p.GetTableId()),
}
return obj
}
// ProtoToJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage converts a JobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage(p *dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage) *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage{}
return obj
}
// ProtoToJobTriggerInspectJobActionsPubSub converts a JobTriggerInspectJobActionsPubSub object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsPubSub(p *dlppb.DlpJobTriggerInspectJobActionsPubSub) *dlp.JobTriggerInspectJobActionsPubSub {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsPubSub{
Topic: dcl.StringOrNil(p.GetTopic()),
}
return obj
}
// ProtoToJobTriggerInspectJobActionsPublishSummaryToCscc converts a JobTriggerInspectJobActionsPublishSummaryToCscc object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsPublishSummaryToCscc(p *dlppb.DlpJobTriggerInspectJobActionsPublishSummaryToCscc) *dlp.JobTriggerInspectJobActionsPublishSummaryToCscc {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsPublishSummaryToCscc{}
return obj
}
// ProtoToJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog converts a JobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog(p *dlppb.DlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog) *dlp.JobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog{}
return obj
}
// ProtoToJobTriggerInspectJobActionsJobNotificationEmails converts a JobTriggerInspectJobActionsJobNotificationEmails object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsJobNotificationEmails(p *dlppb.DlpJobTriggerInspectJobActionsJobNotificationEmails) *dlp.JobTriggerInspectJobActionsJobNotificationEmails {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsJobNotificationEmails{}
return obj
}
// ProtoToJobTriggerInspectJobActionsPublishToStackdriver converts a JobTriggerInspectJobActionsPublishToStackdriver object from its proto representation.
func ProtoToDlpJobTriggerInspectJobActionsPublishToStackdriver(p *dlppb.DlpJobTriggerInspectJobActionsPublishToStackdriver) *dlp.JobTriggerInspectJobActionsPublishToStackdriver {
if p == nil {
return nil
}
obj := &dlp.JobTriggerInspectJobActionsPublishToStackdriver{}
return obj
}
// ProtoToJobTriggerTriggers converts a JobTriggerTriggers object from its proto representation.
func ProtoToDlpJobTriggerTriggers(p *dlppb.DlpJobTriggerTriggers) *dlp.JobTriggerTriggers {
if p == nil {
return nil
}
obj := &dlp.JobTriggerTriggers{
Schedule: ProtoToDlpJobTriggerTriggersSchedule(p.GetSchedule()),
Manual: ProtoToDlpJobTriggerTriggersManual(p.GetManual()),
}
return obj
}
// ProtoToJobTriggerTriggersSchedule converts a JobTriggerTriggersSchedule object from its proto representation.
func ProtoToDlpJobTriggerTriggersSchedule(p *dlppb.DlpJobTriggerTriggersSchedule) *dlp.JobTriggerTriggersSchedule {
if p == nil {
return nil
}
obj := &dlp.JobTriggerTriggersSchedule{
RecurrencePeriodDuration: dcl.StringOrNil(p.GetRecurrencePeriodDuration()),
}
return obj
}
// ProtoToJobTriggerTriggersManual converts a JobTriggerTriggersManual object from its proto representation.
func ProtoToDlpJobTriggerTriggersManual(p *dlppb.DlpJobTriggerTriggersManual) *dlp.JobTriggerTriggersManual {
if p == nil {
return nil
}
obj := &dlp.JobTriggerTriggersManual{}
return obj
}
// ProtoToJobTriggerErrors converts a JobTriggerErrors object from its proto representation.
func ProtoToDlpJobTriggerErrors(p *dlppb.DlpJobTriggerErrors) *dlp.JobTriggerErrors {
if p == nil {
return nil
}
obj := &dlp.JobTriggerErrors{
Details: ProtoToDlpJobTriggerErrorsDetails(p.GetDetails()),
}
for _, r := range p.GetTimestamps() {
obj.Timestamps = append(obj.Timestamps, r)
}
return obj
}
// ProtoToJobTriggerErrorsDetails converts a JobTriggerErrorsDetails object from its proto representation.
func ProtoToDlpJobTriggerErrorsDetails(p *dlppb.DlpJobTriggerErrorsDetails) *dlp.JobTriggerErrorsDetails {
if p == nil {
return nil
}
obj := &dlp.JobTriggerErrorsDetails{
Code: dcl.Int64OrNil(p.GetCode()),
Message: dcl.StringOrNil(p.GetMessage()),
}
for _, r := range p.GetDetails() {
obj.Details = append(obj.Details, *ProtoToDlpJobTriggerErrorsDetailsDetails(r))
}
return obj
}
// ProtoToJobTriggerErrorsDetailsDetails converts a JobTriggerErrorsDetailsDetails object from its proto representation.
func ProtoToDlpJobTriggerErrorsDetailsDetails(p *dlppb.DlpJobTriggerErrorsDetailsDetails) *dlp.JobTriggerErrorsDetailsDetails {
if p == nil {
return nil
}
obj := &dlp.JobTriggerErrorsDetailsDetails{
TypeUrl: dcl.StringOrNil(p.GetTypeUrl()),
Value: dcl.StringOrNil(p.GetValue()),
}
return obj
}
// ProtoToJobTrigger converts a JobTrigger resource from its proto representation.
func ProtoToJobTrigger(p *dlppb.DlpJobTrigger) *dlp.JobTrigger {
obj := &dlp.JobTrigger{
Name: dcl.StringOrNil(p.GetName()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
Description: dcl.StringOrNil(p.GetDescription()),
InspectJob: ProtoToDlpJobTriggerInspectJob(p.GetInspectJob()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
LastRunTime: dcl.StringOrNil(p.GetLastRunTime()),
Status: ProtoToDlpJobTriggerStatusEnum(p.GetStatus()),
LocationId: dcl.StringOrNil(p.GetLocationId()),
Parent: dcl.StringOrNil(p.GetParent()),
Location: dcl.StringOrNil(p.GetLocation()),
}
for _, r := range p.GetTriggers() {
obj.Triggers = append(obj.Triggers, *ProtoToDlpJobTriggerTriggers(r))
}
for _, r := range p.GetErrors() {
obj.Errors = append(obj.Errors, *ProtoToDlpJobTriggerErrors(r))
}
return obj
}
// JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnumToProto converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum enum to its proto representation.
func DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnumToProto(e *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum) dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum_value["JobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(v)
}
return dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(0)
}
// JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnumToProto converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum enum to its proto representation.
func DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnumToProto(e *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum) dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum_value["JobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum(v)
}
return dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnum(0)
}
// JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnumToProto converts a JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum enum to its proto representation.
func DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnumToProto(e *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum) dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum_value["JobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum(v)
}
return dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnum(0)
}
// JobTriggerInspectJobInspectConfigMinLikelihoodEnumToProto converts a JobTriggerInspectJobInspectConfigMinLikelihoodEnum enum to its proto representation.
func DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnumToProto(e *dlp.JobTriggerInspectJobInspectConfigMinLikelihoodEnum) dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum_value["JobTriggerInspectJobInspectConfigMinLikelihoodEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum(v)
}
return dlppb.DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnum(0)
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnumToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum enum to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnumToProto(e *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum) dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum_value["JobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum(v)
}
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnum(0)
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnumToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum enum to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnumToProto(e *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum) dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum_value["JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(v)
}
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(0)
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnumToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum enum to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnumToProto(e *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum) dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum_value["JobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum(v)
}
return dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnum(0)
}
// JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnumToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum enum to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnumToProto(e *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum) dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum_value["JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(v)
}
return dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnum(0)
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnumToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum enum to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnumToProto(e *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum) dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum_value["JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum(v)
}
return dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnum(0)
}
// JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnumToProto converts a JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum enum to its proto representation.
func DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnumToProto(e *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum) dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum {
if e == nil {
return dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum(0)
}
if v, ok := dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum_value["JobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum(v)
}
return dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnum(0)
}
// JobTriggerStatusEnumToProto converts a JobTriggerStatusEnum enum to its proto representation.
func DlpJobTriggerStatusEnumToProto(e *dlp.JobTriggerStatusEnum) dlppb.DlpJobTriggerStatusEnum {
if e == nil {
return dlppb.DlpJobTriggerStatusEnum(0)
}
if v, ok := dlppb.DlpJobTriggerStatusEnum_value["JobTriggerStatusEnum"+string(*e)]; ok {
return dlppb.DlpJobTriggerStatusEnum(v)
}
return dlppb.DlpJobTriggerStatusEnum(0)
}
// JobTriggerInspectJobToProto converts a JobTriggerInspectJob object to its proto representation.
func DlpJobTriggerInspectJobToProto(o *dlp.JobTriggerInspectJob) *dlppb.DlpJobTriggerInspectJob {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJob{}
p.SetStorageConfig(DlpJobTriggerInspectJobStorageConfigToProto(o.StorageConfig))
p.SetInspectConfig(DlpJobTriggerInspectJobInspectConfigToProto(o.InspectConfig))
p.SetInspectTemplateName(dcl.ValueOrEmptyString(o.InspectTemplateName))
sActions := make([]*dlppb.DlpJobTriggerInspectJobActions, len(o.Actions))
for i, r := range o.Actions {
sActions[i] = DlpJobTriggerInspectJobActionsToProto(&r)
}
p.SetActions(sActions)
return p
}
// JobTriggerInspectJobStorageConfigToProto converts a JobTriggerInspectJobStorageConfig object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigToProto(o *dlp.JobTriggerInspectJobStorageConfig) *dlppb.DlpJobTriggerInspectJobStorageConfig {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfig{}
p.SetDatastoreOptions(DlpJobTriggerInspectJobStorageConfigDatastoreOptionsToProto(o.DatastoreOptions))
p.SetCloudStorageOptions(DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsToProto(o.CloudStorageOptions))
p.SetBigQueryOptions(DlpJobTriggerInspectJobStorageConfigBigQueryOptionsToProto(o.BigQueryOptions))
p.SetHybridOptions(DlpJobTriggerInspectJobStorageConfigHybridOptionsToProto(o.HybridOptions))
p.SetTimespanConfig(DlpJobTriggerInspectJobStorageConfigTimespanConfigToProto(o.TimespanConfig))
return p
}
// JobTriggerInspectJobStorageConfigDatastoreOptionsToProto converts a JobTriggerInspectJobStorageConfigDatastoreOptions object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigDatastoreOptionsToProto(o *dlp.JobTriggerInspectJobStorageConfigDatastoreOptions) *dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptions {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptions{}
p.SetPartitionId(DlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdToProto(o.PartitionId))
p.SetKind(DlpJobTriggerInspectJobStorageConfigDatastoreOptionsKindToProto(o.Kind))
return p
}
// JobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdToProto converts a JobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdToProto(o *dlp.JobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId) *dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId{}
p.SetProjectId(dcl.ValueOrEmptyString(o.ProjectId))
p.SetNamespaceId(dcl.ValueOrEmptyString(o.NamespaceId))
return p
}
// JobTriggerInspectJobStorageConfigDatastoreOptionsKindToProto converts a JobTriggerInspectJobStorageConfigDatastoreOptionsKind object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigDatastoreOptionsKindToProto(o *dlp.JobTriggerInspectJobStorageConfigDatastoreOptionsKind) *dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptionsKind {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigDatastoreOptionsKind{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobStorageConfigCloudStorageOptionsToProto converts a JobTriggerInspectJobStorageConfigCloudStorageOptions object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsToProto(o *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptions) *dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptions {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptions{}
p.SetFileSet(DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetToProto(o.FileSet))
p.SetBytesLimitPerFile(dcl.ValueOrEmptyInt64(o.BytesLimitPerFile))
p.SetBytesLimitPerFilePercent(dcl.ValueOrEmptyInt64(o.BytesLimitPerFilePercent))
p.SetSampleMethod(DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethodEnumToProto(o.SampleMethod))
p.SetFilesLimitPercent(dcl.ValueOrEmptyInt64(o.FilesLimitPercent))
sFileTypes := make([]dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum, len(o.FileTypes))
for i, r := range o.FileTypes {
sFileTypes[i] = dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum(dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypesEnum_value[string(r)])
}
p.SetFileTypes(sFileTypes)
return p
}
// JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetToProto converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetToProto(o *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet) *dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet{}
p.SetUrl(dcl.ValueOrEmptyString(o.Url))
p.SetRegexFileSet(DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetToProto(o.RegexFileSet))
return p
}
// JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetToProto converts a JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetToProto(o *dlp.JobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet) *dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet{}
p.SetBucketName(dcl.ValueOrEmptyString(o.BucketName))
sIncludeRegex := make([]string, len(o.IncludeRegex))
for i, r := range o.IncludeRegex {
sIncludeRegex[i] = r
}
p.SetIncludeRegex(sIncludeRegex)
sExcludeRegex := make([]string, len(o.ExcludeRegex))
for i, r := range o.ExcludeRegex {
sExcludeRegex[i] = r
}
p.SetExcludeRegex(sExcludeRegex)
return p
}
// JobTriggerInspectJobStorageConfigBigQueryOptionsToProto converts a JobTriggerInspectJobStorageConfigBigQueryOptions object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigBigQueryOptionsToProto(o *dlp.JobTriggerInspectJobStorageConfigBigQueryOptions) *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptions {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptions{}
p.SetTableReference(DlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceToProto(o.TableReference))
p.SetRowsLimit(dcl.ValueOrEmptyInt64(o.RowsLimit))
p.SetRowsLimitPercent(dcl.ValueOrEmptyInt64(o.RowsLimitPercent))
p.SetSampleMethod(DlpJobTriggerInspectJobStorageConfigBigQueryOptionsSampleMethodEnumToProto(o.SampleMethod))
sIdentifyingFields := make([]*dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields, len(o.IdentifyingFields))
for i, r := range o.IdentifyingFields {
sIdentifyingFields[i] = DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsToProto(&r)
}
p.SetIdentifyingFields(sIdentifyingFields)
sExcludedFields := make([]*dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields, len(o.ExcludedFields))
for i, r := range o.ExcludedFields {
sExcludedFields[i] = DlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsToProto(&r)
}
p.SetExcludedFields(sExcludedFields)
sIncludedFields := make([]*dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields, len(o.IncludedFields))
for i, r := range o.IncludedFields {
sIncludedFields[i] = DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsToProto(&r)
}
p.SetIncludedFields(sIncludedFields)
return p
}
// JobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceToProto converts a JobTriggerInspectJobStorageConfigBigQueryOptionsTableReference object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceToProto(o *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsTableReference) *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference{}
p.SetProjectId(dcl.ValueOrEmptyString(o.ProjectId))
p.SetDatasetId(dcl.ValueOrEmptyString(o.DatasetId))
p.SetTableId(dcl.ValueOrEmptyString(o.TableId))
return p
}
// JobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsToProto converts a JobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFieldsToProto(o *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields) *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIdentifyingFields{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsToProto converts a JobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFieldsToProto(o *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields) *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsExcludedFields{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsToProto converts a JobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFieldsToProto(o *dlp.JobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields) *dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigBigQueryOptionsIncludedFields{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobStorageConfigHybridOptionsToProto converts a JobTriggerInspectJobStorageConfigHybridOptions object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigHybridOptionsToProto(o *dlp.JobTriggerInspectJobStorageConfigHybridOptions) *dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptions {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptions{}
p.SetDescription(dcl.ValueOrEmptyString(o.Description))
p.SetTableOptions(DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsToProto(o.TableOptions))
sRequiredFindingLabelKeys := make([]string, len(o.RequiredFindingLabelKeys))
for i, r := range o.RequiredFindingLabelKeys {
sRequiredFindingLabelKeys[i] = r
}
p.SetRequiredFindingLabelKeys(sRequiredFindingLabelKeys)
mLabels := make(map[string]string, len(o.Labels))
for k, r := range o.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
return p
}
// JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsToProto converts a JobTriggerInspectJobStorageConfigHybridOptionsTableOptions object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsToProto(o *dlp.JobTriggerInspectJobStorageConfigHybridOptionsTableOptions) *dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptions {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptions{}
sIdentifyingFields := make([]*dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields, len(o.IdentifyingFields))
for i, r := range o.IdentifyingFields {
sIdentifyingFields[i] = DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsToProto(&r)
}
p.SetIdentifyingFields(sIdentifyingFields)
return p
}
// JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsToProto converts a JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFieldsToProto(o *dlp.JobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields) *dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigHybridOptionsTableOptionsIdentifyingFields{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobStorageConfigTimespanConfigToProto converts a JobTriggerInspectJobStorageConfigTimespanConfig object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigTimespanConfigToProto(o *dlp.JobTriggerInspectJobStorageConfigTimespanConfig) *dlppb.DlpJobTriggerInspectJobStorageConfigTimespanConfig {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigTimespanConfig{}
p.SetStartTime(dcl.ValueOrEmptyString(o.StartTime))
p.SetEndTime(dcl.ValueOrEmptyString(o.EndTime))
p.SetTimestampField(DlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldToProto(o.TimestampField))
p.SetEnableAutoPopulationOfTimespanConfig(dcl.ValueOrEmptyBool(o.EnableAutoPopulationOfTimespanConfig))
return p
}
// JobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldToProto converts a JobTriggerInspectJobStorageConfigTimespanConfigTimestampField object to its proto representation.
func DlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldToProto(o *dlp.JobTriggerInspectJobStorageConfigTimespanConfigTimestampField) *dlppb.DlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampField {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobStorageConfigTimespanConfigTimestampField{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobInspectConfigToProto converts a JobTriggerInspectJobInspectConfig object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigToProto(o *dlp.JobTriggerInspectJobInspectConfig) *dlppb.DlpJobTriggerInspectJobInspectConfig {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfig{}
p.SetMinLikelihood(DlpJobTriggerInspectJobInspectConfigMinLikelihoodEnumToProto(o.MinLikelihood))
p.SetLimits(DlpJobTriggerInspectJobInspectConfigLimitsToProto(o.Limits))
p.SetIncludeQuote(dcl.ValueOrEmptyBool(o.IncludeQuote))
p.SetExcludeInfoTypes(dcl.ValueOrEmptyBool(o.ExcludeInfoTypes))
sInfoTypes := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigInfoTypes, len(o.InfoTypes))
for i, r := range o.InfoTypes {
sInfoTypes[i] = DlpJobTriggerInspectJobInspectConfigInfoTypesToProto(&r)
}
p.SetInfoTypes(sInfoTypes)
sCustomInfoTypes := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypes, len(o.CustomInfoTypes))
for i, r := range o.CustomInfoTypes {
sCustomInfoTypes[i] = DlpJobTriggerInspectJobInspectConfigCustomInfoTypesToProto(&r)
}
p.SetCustomInfoTypes(sCustomInfoTypes)
sRuleSet := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigRuleSet, len(o.RuleSet))
for i, r := range o.RuleSet {
sRuleSet[i] = DlpJobTriggerInspectJobInspectConfigRuleSetToProto(&r)
}
p.SetRuleSet(sRuleSet)
return p
}
// JobTriggerInspectJobInspectConfigInfoTypesToProto converts a JobTriggerInspectJobInspectConfigInfoTypes object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigInfoTypesToProto(o *dlp.JobTriggerInspectJobInspectConfigInfoTypes) *dlppb.DlpJobTriggerInspectJobInspectConfigInfoTypes {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigInfoTypes{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// JobTriggerInspectJobInspectConfigLimitsToProto converts a JobTriggerInspectJobInspectConfigLimits object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigLimitsToProto(o *dlp.JobTriggerInspectJobInspectConfigLimits) *dlppb.DlpJobTriggerInspectJobInspectConfigLimits {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigLimits{}
p.SetMaxFindingsPerItem(dcl.ValueOrEmptyInt64(o.MaxFindingsPerItem))
p.SetMaxFindingsPerRequest(dcl.ValueOrEmptyInt64(o.MaxFindingsPerRequest))
sMaxFindingsPerInfoType := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType, len(o.MaxFindingsPerInfoType))
for i, r := range o.MaxFindingsPerInfoType {
sMaxFindingsPerInfoType[i] = DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeToProto(&r)
}
p.SetMaxFindingsPerInfoType(sMaxFindingsPerInfoType)
return p
}
// JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeToProto converts a JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeToProto(o *dlp.JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType) *dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoType{}
p.SetInfoType(DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeToProto(o.InfoType))
p.SetMaxFindings(dcl.ValueOrEmptyInt64(o.MaxFindings))
return p
}
// JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeToProto converts a JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeToProto(o *dlp.JobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType) *dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigLimitsMaxFindingsPerInfoTypeInfoType{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetVersion(dcl.ValueOrEmptyString(o.Version))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypes object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypes) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypes {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypes{}
p.SetInfoType(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeToProto(o.InfoType))
p.SetLikelihood(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesLikelihoodEnumToProto(o.Likelihood))
p.SetDictionary(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryToProto(o.Dictionary))
p.SetRegex(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegexToProto(o.Regex))
p.SetSurrogateType(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateTypeToProto(o.SurrogateType))
p.SetStoredType(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeToProto(o.StoredType))
p.SetExclusionType(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesExclusionTypeEnumToProto(o.ExclusionType))
sDetectionRules := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules, len(o.DetectionRules))
for i, r := range o.DetectionRules {
sDetectionRules[i] = DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesToProto(&r)
}
p.SetDetectionRules(sDetectionRules)
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesInfoType object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoTypeToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesInfoType) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesInfoType{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetVersion(dcl.ValueOrEmptyString(o.Version))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDictionary object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionary) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionary{}
p.SetWordList(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListToProto(o.WordList))
p.SetCloudStoragePath(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathToProto(o.CloudStoragePath))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordListToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryWordList{}
sWords := make([]string, len(o.Words))
for i, r := range o.Words {
sWords[i] = r
}
p.SetWords(sWords)
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePathToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDictionaryCloudStoragePath{}
p.SetPath(dcl.ValueOrEmptyString(o.Path))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesRegexToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesRegex object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegexToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesRegex) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegex {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesRegex{}
p.SetPattern(dcl.ValueOrEmptyString(o.Pattern))
sGroupIndexes := make([]int64, len(o.GroupIndexes))
for i, r := range o.GroupIndexes {
sGroupIndexes[i] = r
}
p.SetGroupIndexes(sGroupIndexes)
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateTypeToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateTypeToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesSurrogateType{}
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesStoredType object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredTypeToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesStoredType) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesStoredType{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetCreateTime(dcl.ValueOrEmptyString(o.CreateTime))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRules{}
p.SetHotwordRule(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleToProto(o.HotwordRule))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRule{}
p.SetHotwordRegex(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegexToProto(o.HotwordRegex))
p.SetProximity(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximityToProto(o.Proximity))
p.SetLikelihoodAdjustment(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentToProto(o.LikelihoodAdjustment))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegexToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegexToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleHotwordRegex{}
p.SetPattern(dcl.ValueOrEmptyString(o.Pattern))
sGroupIndexes := make([]int64, len(o.GroupIndexes))
for i, r := range o.GroupIndexes {
sGroupIndexes[i] = r
}
p.SetGroupIndexes(sGroupIndexes)
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximityToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximityToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleProximity{}
p.SetWindowBefore(dcl.ValueOrEmptyInt64(o.WindowBefore))
p.SetWindowAfter(dcl.ValueOrEmptyInt64(o.WindowAfter))
return p
}
// JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentToProto converts a JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentToProto(o *dlp.JobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment) *dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustment{}
p.SetFixedLikelihood(DlpJobTriggerInspectJobInspectConfigCustomInfoTypesDetectionRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnumToProto(o.FixedLikelihood))
p.SetRelativeLikelihood(dcl.ValueOrEmptyInt64(o.RelativeLikelihood))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetToProto converts a JobTriggerInspectJobInspectConfigRuleSet object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSet) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSet {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSet{}
sInfoTypes := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetInfoTypes, len(o.InfoTypes))
for i, r := range o.InfoTypes {
sInfoTypes[i] = DlpJobTriggerInspectJobInspectConfigRuleSetInfoTypesToProto(&r)
}
p.SetInfoTypes(sInfoTypes)
sRules := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRules, len(o.Rules))
for i, r := range o.Rules {
sRules[i] = DlpJobTriggerInspectJobInspectConfigRuleSetRulesToProto(&r)
}
p.SetRules(sRules)
return p
}
// JobTriggerInspectJobInspectConfigRuleSetInfoTypesToProto converts a JobTriggerInspectJobInspectConfigRuleSetInfoTypes object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetInfoTypesToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetInfoTypes) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetInfoTypes {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetInfoTypes{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetVersion(dcl.ValueOrEmptyString(o.Version))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesToProto converts a JobTriggerInspectJobInspectConfigRuleSetRules object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRules) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRules {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRules{}
p.SetHotwordRule(DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleToProto(o.HotwordRule))
p.SetExclusionRule(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleToProto(o.ExclusionRule))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRule{}
p.SetHotwordRegex(DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexToProto(o.HotwordRegex))
p.SetProximity(DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityToProto(o.Proximity))
p.SetLikelihoodAdjustment(DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentToProto(o.LikelihoodAdjustment))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegexToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleHotwordRegex{}
p.SetPattern(dcl.ValueOrEmptyString(o.Pattern))
sGroupIndexes := make([]int64, len(o.GroupIndexes))
for i, r := range o.GroupIndexes {
sGroupIndexes[i] = r
}
p.SetGroupIndexes(sGroupIndexes)
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximityToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleProximity{}
p.SetWindowBefore(dcl.ValueOrEmptyInt64(o.WindowBefore))
p.SetWindowAfter(dcl.ValueOrEmptyInt64(o.WindowAfter))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment{}
p.SetFixedLikelihood(DlpJobTriggerInspectJobInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihoodEnumToProto(o.FixedLikelihood))
p.SetRelativeLikelihood(dcl.ValueOrEmptyInt64(o.RelativeLikelihood))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRule{}
p.SetDictionary(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryToProto(o.Dictionary))
p.SetRegex(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexToProto(o.Regex))
p.SetExcludeInfoTypes(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesToProto(o.ExcludeInfoTypes))
p.SetMatchingType(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleMatchingTypeEnumToProto(o.MatchingType))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionary{}
p.SetWordList(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListToProto(o.WordList))
p.SetCloudStoragePath(DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathToProto(o.CloudStoragePath))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordListToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryWordList{}
sWords := make([]string, len(o.Words))
for i, r := range o.Words {
sWords[i] = r
}
p.SetWords(sWords)
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath{}
p.SetPath(dcl.ValueOrEmptyString(o.Path))
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegexToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleRegex{}
p.SetPattern(dcl.ValueOrEmptyString(o.Pattern))
sGroupIndexes := make([]int64, len(o.GroupIndexes))
for i, r := range o.GroupIndexes {
sGroupIndexes[i] = r
}
p.SetGroupIndexes(sGroupIndexes)
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes{}
sInfoTypes := make([]*dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes, len(o.InfoTypes))
for i, r := range o.InfoTypes {
sInfoTypes[i] = DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesToProto(&r)
}
p.SetInfoTypes(sInfoTypes)
return p
}
// JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesToProto converts a JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes object to its proto representation.
func DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesToProto(o *dlp.JobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes) *dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetVersion(dcl.ValueOrEmptyString(o.Version))
return p
}
// JobTriggerInspectJobActionsToProto converts a JobTriggerInspectJobActions object to its proto representation.
func DlpJobTriggerInspectJobActionsToProto(o *dlp.JobTriggerInspectJobActions) *dlppb.DlpJobTriggerInspectJobActions {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActions{}
p.SetSaveFindings(DlpJobTriggerInspectJobActionsSaveFindingsToProto(o.SaveFindings))
p.SetPubSub(DlpJobTriggerInspectJobActionsPubSubToProto(o.PubSub))
p.SetPublishSummaryToCscc(DlpJobTriggerInspectJobActionsPublishSummaryToCsccToProto(o.PublishSummaryToCscc))
p.SetPublishFindingsToCloudDataCatalog(DlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalogToProto(o.PublishFindingsToCloudDataCatalog))
p.SetJobNotificationEmails(DlpJobTriggerInspectJobActionsJobNotificationEmailsToProto(o.JobNotificationEmails))
p.SetPublishToStackdriver(DlpJobTriggerInspectJobActionsPublishToStackdriverToProto(o.PublishToStackdriver))
return p
}
// JobTriggerInspectJobActionsSaveFindingsToProto converts a JobTriggerInspectJobActionsSaveFindings object to its proto representation.
func DlpJobTriggerInspectJobActionsSaveFindingsToProto(o *dlp.JobTriggerInspectJobActionsSaveFindings) *dlppb.DlpJobTriggerInspectJobActionsSaveFindings {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsSaveFindings{}
p.SetOutputConfig(DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigToProto(o.OutputConfig))
return p
}
// JobTriggerInspectJobActionsSaveFindingsOutputConfigToProto converts a JobTriggerInspectJobActionsSaveFindingsOutputConfig object to its proto representation.
func DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigToProto(o *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfig) *dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfig {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfig{}
p.SetTable(DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTableToProto(o.Table))
p.SetDlpStorage(DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorageToProto(o.DlpStorage))
p.SetOutputSchema(DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchemaEnumToProto(o.OutputSchema))
return p
}
// JobTriggerInspectJobActionsSaveFindingsOutputConfigTableToProto converts a JobTriggerInspectJobActionsSaveFindingsOutputConfigTable object to its proto representation.
func DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTableToProto(o *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigTable) *dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTable {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigTable{}
p.SetProjectId(dcl.ValueOrEmptyString(o.ProjectId))
p.SetDatasetId(dcl.ValueOrEmptyString(o.DatasetId))
p.SetTableId(dcl.ValueOrEmptyString(o.TableId))
return p
}
// JobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorageToProto converts a JobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage object to its proto representation.
func DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorageToProto(o *dlp.JobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage) *dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsSaveFindingsOutputConfigDlpStorage{}
return p
}
// JobTriggerInspectJobActionsPubSubToProto converts a JobTriggerInspectJobActionsPubSub object to its proto representation.
func DlpJobTriggerInspectJobActionsPubSubToProto(o *dlp.JobTriggerInspectJobActionsPubSub) *dlppb.DlpJobTriggerInspectJobActionsPubSub {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsPubSub{}
p.SetTopic(dcl.ValueOrEmptyString(o.Topic))
return p
}
// JobTriggerInspectJobActionsPublishSummaryToCsccToProto converts a JobTriggerInspectJobActionsPublishSummaryToCscc object to its proto representation.
func DlpJobTriggerInspectJobActionsPublishSummaryToCsccToProto(o *dlp.JobTriggerInspectJobActionsPublishSummaryToCscc) *dlppb.DlpJobTriggerInspectJobActionsPublishSummaryToCscc {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsPublishSummaryToCscc{}
return p
}
// JobTriggerInspectJobActionsPublishFindingsToCloudDataCatalogToProto converts a JobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog object to its proto representation.
func DlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalogToProto(o *dlp.JobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog) *dlppb.DlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsPublishFindingsToCloudDataCatalog{}
return p
}
// JobTriggerInspectJobActionsJobNotificationEmailsToProto converts a JobTriggerInspectJobActionsJobNotificationEmails object to its proto representation.
func DlpJobTriggerInspectJobActionsJobNotificationEmailsToProto(o *dlp.JobTriggerInspectJobActionsJobNotificationEmails) *dlppb.DlpJobTriggerInspectJobActionsJobNotificationEmails {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsJobNotificationEmails{}
return p
}
// JobTriggerInspectJobActionsPublishToStackdriverToProto converts a JobTriggerInspectJobActionsPublishToStackdriver object to its proto representation.
func DlpJobTriggerInspectJobActionsPublishToStackdriverToProto(o *dlp.JobTriggerInspectJobActionsPublishToStackdriver) *dlppb.DlpJobTriggerInspectJobActionsPublishToStackdriver {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerInspectJobActionsPublishToStackdriver{}
return p
}
// JobTriggerTriggersToProto converts a JobTriggerTriggers object to its proto representation.
func DlpJobTriggerTriggersToProto(o *dlp.JobTriggerTriggers) *dlppb.DlpJobTriggerTriggers {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerTriggers{}
p.SetSchedule(DlpJobTriggerTriggersScheduleToProto(o.Schedule))
p.SetManual(DlpJobTriggerTriggersManualToProto(o.Manual))
return p
}
// JobTriggerTriggersScheduleToProto converts a JobTriggerTriggersSchedule object to its proto representation.
func DlpJobTriggerTriggersScheduleToProto(o *dlp.JobTriggerTriggersSchedule) *dlppb.DlpJobTriggerTriggersSchedule {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerTriggersSchedule{}
p.SetRecurrencePeriodDuration(dcl.ValueOrEmptyString(o.RecurrencePeriodDuration))
return p
}
// JobTriggerTriggersManualToProto converts a JobTriggerTriggersManual object to its proto representation.
func DlpJobTriggerTriggersManualToProto(o *dlp.JobTriggerTriggersManual) *dlppb.DlpJobTriggerTriggersManual {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerTriggersManual{}
return p
}
// JobTriggerErrorsToProto converts a JobTriggerErrors object to its proto representation.
func DlpJobTriggerErrorsToProto(o *dlp.JobTriggerErrors) *dlppb.DlpJobTriggerErrors {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerErrors{}
p.SetDetails(DlpJobTriggerErrorsDetailsToProto(o.Details))
sTimestamps := make([]string, len(o.Timestamps))
for i, r := range o.Timestamps {
sTimestamps[i] = r
}
p.SetTimestamps(sTimestamps)
return p
}
// JobTriggerErrorsDetailsToProto converts a JobTriggerErrorsDetails object to its proto representation.
func DlpJobTriggerErrorsDetailsToProto(o *dlp.JobTriggerErrorsDetails) *dlppb.DlpJobTriggerErrorsDetails {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerErrorsDetails{}
p.SetCode(dcl.ValueOrEmptyInt64(o.Code))
p.SetMessage(dcl.ValueOrEmptyString(o.Message))
sDetails := make([]*dlppb.DlpJobTriggerErrorsDetailsDetails, len(o.Details))
for i, r := range o.Details {
sDetails[i] = DlpJobTriggerErrorsDetailsDetailsToProto(&r)
}
p.SetDetails(sDetails)
return p
}
// JobTriggerErrorsDetailsDetailsToProto converts a JobTriggerErrorsDetailsDetails object to its proto representation.
func DlpJobTriggerErrorsDetailsDetailsToProto(o *dlp.JobTriggerErrorsDetailsDetails) *dlppb.DlpJobTriggerErrorsDetailsDetails {
if o == nil {
return nil
}
p := &dlppb.DlpJobTriggerErrorsDetailsDetails{}
p.SetTypeUrl(dcl.ValueOrEmptyString(o.TypeUrl))
p.SetValue(dcl.ValueOrEmptyString(o.Value))
return p
}
// JobTriggerToProto converts a JobTrigger resource to its proto representation.
func JobTriggerToProto(resource *dlp.JobTrigger) *dlppb.DlpJobTrigger {
p := &dlppb.DlpJobTrigger{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetInspectJob(DlpJobTriggerInspectJobToProto(resource.InspectJob))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetLastRunTime(dcl.ValueOrEmptyString(resource.LastRunTime))
p.SetStatus(DlpJobTriggerStatusEnumToProto(resource.Status))
p.SetLocationId(dcl.ValueOrEmptyString(resource.LocationId))
p.SetParent(dcl.ValueOrEmptyString(resource.Parent))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
sTriggers := make([]*dlppb.DlpJobTriggerTriggers, len(resource.Triggers))
for i, r := range resource.Triggers {
sTriggers[i] = DlpJobTriggerTriggersToProto(&r)
}
p.SetTriggers(sTriggers)
sErrors := make([]*dlppb.DlpJobTriggerErrors, len(resource.Errors))
for i, r := range resource.Errors {
sErrors[i] = DlpJobTriggerErrorsToProto(&r)
}
p.SetErrors(sErrors)
return p
}
// applyJobTrigger handles the gRPC request by passing it to the underlying JobTrigger Apply() method.
func (s *JobTriggerServer) applyJobTrigger(ctx context.Context, c *dlp.Client, request *dlppb.ApplyDlpJobTriggerRequest) (*dlppb.DlpJobTrigger, error) {
p := ProtoToJobTrigger(request.GetResource())
res, err := c.ApplyJobTrigger(ctx, p)
if err != nil {
return nil, err
}
r := JobTriggerToProto(res)
return r, nil
}
// applyDlpJobTrigger handles the gRPC request by passing it to the underlying JobTrigger Apply() method.
func (s *JobTriggerServer) ApplyDlpJobTrigger(ctx context.Context, request *dlppb.ApplyDlpJobTriggerRequest) (*dlppb.DlpJobTrigger, error) {
cl, err := createConfigJobTrigger(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyJobTrigger(ctx, cl, request)
}
// DeleteJobTrigger handles the gRPC request by passing it to the underlying JobTrigger Delete() method.
func (s *JobTriggerServer) DeleteDlpJobTrigger(ctx context.Context, request *dlppb.DeleteDlpJobTriggerRequest) (*emptypb.Empty, error) {
cl, err := createConfigJobTrigger(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteJobTrigger(ctx, ProtoToJobTrigger(request.GetResource()))
}
// ListDlpJobTrigger handles the gRPC request by passing it to the underlying JobTriggerList() method.
func (s *JobTriggerServer) ListDlpJobTrigger(ctx context.Context, request *dlppb.ListDlpJobTriggerRequest) (*dlppb.ListDlpJobTriggerResponse, error) {
cl, err := createConfigJobTrigger(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListJobTrigger(ctx, request.GetLocation(), request.GetParent())
if err != nil {
return nil, err
}
var protos []*dlppb.DlpJobTrigger
for _, r := range resources.Items {
rp := JobTriggerToProto(r)
protos = append(protos, rp)
}
p := &dlppb.ListDlpJobTriggerResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigJobTrigger(ctx context.Context, service_account_file string) (*dlp.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return dlp.NewClient(conf), nil
}
|
package netlib
import (
"errors"
"net"
"github.com/elitah/utils/atomic"
)
var (
EClosed = errors.New("channel closed")
)
type ListenerWithInput interface {
net.Listener
Input(net.Conn) error
}
type chanListener struct {
flag atomic.AInt32
addr net.Addr
ch chan net.Conn
}
func NewChanListener(addr net.Addr, size int) ListenerWithInput {
if nil != addr {
return &chanListener{
addr: addr,
ch: make(chan net.Conn, size),
}
}
return nil
}
func (this *chanListener) Accept() (net.Conn, error) {
if 0x0 == this.flag.Load() {
if conn, ok := <-this.ch; ok {
return conn, nil
}
}
return nil, EClosed
}
func (this *chanListener) Addr() net.Addr {
return this.addr
}
func (this *chanListener) Input(conn net.Conn) error {
if 0x0 == this.flag.Load() {
this.ch <- conn
return nil
}
return EClosed
}
func (this *chanListener) Close() error {
if this.flag.CAS(0x0, 0x1) {
//
close(this.ch)
//
this.ch = nil
//
return nil
}
//
return EClosed
}
|
package main
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
)
/*
reportUrls := {
"/api/v0/diagnostic_report",
"/api/v0/diagnostic_report.json",
}
*/
// OpsManClient is the client for the ops manager.
type OpsManClient struct {
address string
token string
}
// DiagnosticReport is the result from /api/v0/diagnostic_report
type DiagnosticReport struct {
Legacy bool `json:"-"`
Versions struct {
Schema string `json:"installation_schema_version"`
Meta string `json:"metadata_version"`
Release string `json:"release_version"`
} `json:"versions"`
Products struct {
Deployed []struct {
Name string `json:"name"`
Version string `json:"version"`
Stemcell string `json:"stemcell"`
} `json:"deployed"`
} `json:"added_products"`
Stemcells []string `json:"stemcells"`
}
// OAuthPayload is the wrapper for the oauth token.
type OAuthPayload struct {
TokenType string `json:"token_type"`
AccessToken string `json:"access_token"`
}
// HeaderValue is the authorization header value.
func (payload OAuthPayload) HeaderValue() (output string) {
output = fmt.Sprintf("%s %s", payload.TokenType, payload.AccessToken)
return
}
// NewOpsManClient will create a new ops manager client.
func NewOpsManClient() (opsManClient *OpsManClient, err error) {
user := os.Getenv("OPSMAN_USER")
address := os.Getenv("UAA_ADDRESS")
password := os.Getenv("OPSMAN_PASSWORD")
if len(user) == 0 || len(address) == 0 || len(password) == 0 {
err = errors.New("Misconfigured")
return
}
log.Printf("Logging in as: %s", user)
opsManClient = &OpsManClient{
address: address,
}
path := fmt.Sprintf("/uaa/oauth/token?grant_type=password&username=%s&password=%s", user, password)
err = opsManClient.callURL(path, func(code int, data []byte) (e error) {
var token OAuthPayload
if codeIsGood(code) {
if e = json.Unmarshal(data, &token); e == nil {
opsManClient.token = token.HeaderValue()
}
}
return
})
return
}
func codeIsGood(code int) bool {
return code >= http.StatusOK && code < http.StatusBadRequest
}
// GetInfo will return the info.
func (opsManClient *OpsManClient) GetInfo(report *DiagnosticReport) (err error) {
err = opsManClient.callURL("/api/v0/diagnostic_report.json", func(code int, data []byte) (e error) {
if codeIsGood(code) {
if e = json.Unmarshal(data, report); e == nil {
// cleanup the stemcell versions here.
for index, prod := range report.Products.Deployed {
// This is assuming that the format will not change...
// currently it is: bosh-stemcell-3232.19-vsphere-esxi-ubuntu-trusty-go_agent.tgz
if parts := strings.Split(prod.Stemcell, "-"); len(parts) >= 3 {
report.Products.Deployed[index].Stemcell = parts[2]
}
}
}
} else if code == http.StatusNotFound {
report.Legacy = true
}
return
})
return
}
// callUrl will call the url and add the appropriate headers.
func (opsManClient *OpsManClient) callURL(path string, operation func(int, []byte) error) (err error) {
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
url := opsManClient.address + path
var req *http.Request
if req, err = http.NewRequest("GET", url, nil); err == nil {
if len(opsManClient.token) != 0 {
req.Header.Add("Authorization", opsManClient.token)
} else {
// shortcut for opsman login.
req.SetBasicAuth("opsman", "")
}
var resp *http.Response
if resp, err = client.Do(req); err == nil {
defer resp.Body.Close()
var resBody []byte
code := resp.StatusCode
if codeIsGood(code) {
if resBody, err = ioutil.ReadAll(resp.Body); err == nil {
}
}
err = operation(code, resBody)
}
}
return
}
|
package middleware
import (
"database/sql"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
)
var db *sql.DB
func Storage(cfg string) gin.HandlerFunc {
db, err := sql.Open("mysql", cfg)
if err != nil {
panic("Failed to connect to database.")
}
return func(c *gin.Context) {
c.Set("db", db)
c.Next()
}
}
func CloseStorage() error {
return db.Close()
}
|
package manager
import (
"net"
"github.com/Cloud-Foundations/Dominator/lib/net/vsock"
)
func (m *Manager) checkVsockets() error {
if cid, err := vsock.GetContextID(); err != nil {
return nil
} else if cid != 2 {
m.Logger.Printf("detected VSOCK CID=%d, not enabling\n", cid)
} else {
m.vsocketsEnabled = true
m.Logger.Println("VSOCK enabled")
}
return nil
}
func (m *Manager) getVmCID(ipAddr net.IP) (uint32, error) {
if !m.vsocketsEnabled {
return 0, nil
}
if ip4 := ipAddr.To4(); ip4 == nil {
return 0, nil
} else {
return uint32(ip4[0])<<24 |
uint32(ip4[1])<<16 |
uint32(ip4[2])<<8 |
uint32(ip4[3]),
nil
}
}
|
package models
//Member : For /api/projects/:pid/members
type Member struct {
UserName string `json:"username"`
Roles []int `json:"roles"`
}
|
package alipay
import (
"crypto"
"encoding/base64"
"encoding/json"
"errors"
"github.com/imkos/alipay/encoding"
"github.com/tidwall/gjson"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
var (
RSA = &RSA_sign{sign_type: K_SIGN_TYPE_RSA, hash: crypto.SHA1}
RSA2 = &RSA_sign{sign_type: K_SIGN_TYPE_RSA2, hash: crypto.SHA256}
)
type AliPay struct {
appId string
apiDomain string
partnerId string
client *http.Client
Signer AliSign
}
//此方法一般一个开发者对象只需调用一次,调用后全局的RSA,RSA2的sig都会同样初始
func NewAliPay(s_appId, s_partnerId string, sg *encoding.SignPKCS, isProduction bool) (*AliPay, error) {
if sg == nil {
return nil, errors.New("*SignPKCS is nil")
}
cli := &AliPay{
appId: s_appId,
partnerId: s_partnerId,
client: &http.Client{},
}
if isProduction {
cli.apiDomain = K_ALI_PAY_PRODUCTION_API_URL
} else {
cli.apiDomain = K_ALI_PAY_SANDBOX_API_URL
}
RSA.sig = sg
RSA2.sig = sg
//默认采用RSA2, 如需使用RSA,在New..后面重新指定RSA
cli.Signer = RSA2
return cli, nil
}
func (ap *AliPay) URLValues(param AliPayParam) (value url.Values, err error) {
p := url.Values{}
p.Add("app_id", ap.appId)
p.Add("method", param.APIName())
p.Add("format", K_FORMAT)
p.Add("charset", K_CHARSET)
p.Add("sign_type", ap.Signer.Get_Signtype())
p.Add("timestamp", time.Now().Format(K_TIME_FORMAT))
p.Add("version", K_VERSION)
if len(param.ExtJSONParamName()) > 0 {
p.Add(param.ExtJSONParamName(), param.ExtJSONParamValue())
}
ps := param.Params()
if ps != nil {
for key, value := range ps {
p.Add(key, value)
}
}
s_keys := make([]string, 0)
for key := range p {
s_keys = append(s_keys, key)
}
sort.Strings(s_keys)
sign, err := ap.Signer.Sign(s_keys, p)
if err != nil {
return nil, err
}
p.Add("sign", sign)
return p, nil
}
func (ap *AliPay) doRequest(method string, param AliPayParam, results interface{}) (err error) {
if param == nil {
return errors.New("AliPayParam is nil!")
}
p, err := ap.URLValues(param)
if err != nil {
return err
}
req, err := http.NewRequest(method, ap.apiDomain, strings.NewReader(p.Encode()))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded;charset=utf-8")
resp, err := ap.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if ap.Signer.CanVerify() {
rootNodeName := strings.Replace(param.APIName(), ".", "_", -1) + k_RESPONSE_SUFFIX
gj := gjson.ParseBytes(data)
if err := ap.Signer.VerifyResponseData([]byte(gj.Get(rootNodeName).Raw), gj.Get("sign").Str); err != nil {
return err
}
}
return json.Unmarshal(data, results)
}
func (ap *AliPay) DoRequest(method string, param AliPayParam, results interface{}) (err error) {
return ap.doRequest(method, param, results)
}
//AliPay签名
type AliSign interface {
Get_Signtype() string
Sign(keys []string, param url.Values) (string, error)
CanVerify() bool
VerifyResponseData(data []byte, sign string) error
}
type RSA_sign struct {
sig *encoding.SignPKCS
sign_type string
hash crypto.Hash
}
func (r *RSA_sign) Get_Signtype() string {
return r.sign_type
}
func (r *RSA_sign) Sign(keys []string, param url.Values) (string, error) {
if r.sig == nil {
return "", errors.New("*SignPKCS is nil!")
}
//如两个参数出现空值直接返回
if keys == nil || param == nil {
return "", nil
}
pList := make([]string, 0, 0)
for _, key := range keys {
value := strings.TrimSpace(param.Get(key))
if len(value) > 0 {
pList = append(pList, key+"="+value)
}
}
src := strings.Join(pList, "&")
sig, err := r.sig.SignPKCS1v15([]byte(src), r.hash)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(sig), nil
}
func (p *RSA_sign) CanVerify() bool {
if p.sig == nil {
return false
}
return p.sig.CanVerify()
}
func (r *RSA_sign) VerifyResponseData(data []byte, sign string) error {
signBytes, err := base64.StdEncoding.DecodeString(sign)
if err != nil {
return err
}
return r.sig.VerifyPKCS1v15(data, signBytes, r.hash)
}
func verifySign(req *http.Request) (ok bool, err error) {
sign, err := base64.StdEncoding.DecodeString(req.PostForm.Get("sign"))
signType := req.PostForm.Get("sign_type")
if err != nil {
return false, err
}
keys := make([]string, 0)
for key, value := range req.PostForm {
if key == "sign" || key == "sign_type" {
continue
}
if len(value) > 0 {
keys = append(keys, key)
}
}
sort.Strings(keys)
pList := make([]string, 0)
for _, key := range keys {
value := strings.TrimSpace(req.PostForm.Get(key))
if len(value) > 0 {
pList = append(pList, key+"="+value)
}
}
s := strings.Join(pList, "&")
if signType == K_SIGN_TYPE_RSA {
if RSA.CanVerify() {
err = RSA.sig.VerifyPKCS1v15([]byte(s), sign, crypto.SHA1)
}
} else {
if RSA2.CanVerify() {
err = RSA2.sig.VerifyPKCS1v15([]byte(s), sign, crypto.SHA256)
}
}
if err != nil {
return false, err
}
return true, nil
}
|
package main
import (
"bytes"
"io/ioutil"
"log"
"syscall"
)
func getHostname() []byte {
hostname, err := ioutil.ReadFile("/etc/hostname")
if err != nil {
log.Println("error while reading hostname:", err)
return []byte("akina")
}
return bytes.TrimSpace(hostname)
}
func initHostname() {
hostname := getHostname()
log.Println("setting hostname to", string(hostname))
err := syscall.Sethostname(hostname)
if err != nil {
log.Println("error while setting hostname:", err)
}
}
|
package global
import (
"errors"
"fmt"
"io"
"strings"
"time"
multiple "ucp/multiple"
)
type GlobalConfig struct {
Endpoints map[string]*UserConfig `json:"endpoints"`
// Servers map[string]*UserConfig `json:"servers"`
Mtu int `json:"mtu"`
}
func (c *GlobalConfig) Start() error {
var errs []string
for _, h := range c.Endpoints {
// fmt.Println(tag, h)
err := h.Start()
if err != nil {
errs = append(errs, err.Error())
}
}
// for _, h := range c.Servers {
// // fmt.Println(tag, h)
// err := h.Start()
// if err != nil {
// errs = append(errs, err.Error())
// }
// }
if len(errs) != 0 {
return errors.New(strings.Join(errs, "|"))
}
return nil
}
func (c *GlobalConfig) Close() error {
var errs []string
for _, h := range c.Endpoints {
err := h.Close()
if err != nil {
errs = append(errs, err.Error())
}
}
if len(errs) != 0 {
return errors.New(strings.Join(errs, "|"))
}
return nil
}
type UserConfig struct {
// Tag string
Network string `json:"network"`
Addrs []string `json:"addrs"`
DialTimeout string `json:"dialtimeout"`
ReadTimeout string `json:"readtimeout"`
WriteTimeout string `json:"writetimeout"`
BufferTimeout string `json:"buffertimeout"`
Listen string `json:"listen"`
worker io.ReadWriteCloser
}
func (c *UserConfig) Key() string {
return fmt.Sprint(c.Network, c.Addrs)
}
func (c *UserConfig) Start() error {
dialTimeout, _ := time.ParseDuration(c.DialTimeout)
conns, err := multiple.EstablishConnection(c.Network, c.Listen, c.Addrs, dialTimeout)
if err != nil {
return err
}
w, err := multiple.NewUserMgr(conns, []string{c.DialTimeout, c.WriteTimeout, c.ReadTimeout, c.BufferTimeout})
if err != nil {
return err
}
c.worker = w
return nil
}
func (c *UserConfig) Close() error {
return c.worker.Close()
}
func (c *UserConfig) Send(p []byte) (int, error) {
return c.worker.Write(p)
}
func (c *UserConfig) Recv(p []byte) (int, error) {
return c.worker.Read(p)
}
|
package main
func main() {
data := PayloadCollection{}
// 1
payloadHandler(data)
// 2
payloadHandler2(data)
// 3
dispatcher := NewDispatcher(MaxWorker)
dispatcher.Run()
payloadHandler3(data)
}
|
package model
type SendMessageRequest struct {
message []byte
protocol int
src VirtualIp
dest VirtualIp
}
func MakeSendMessageRequest(message []byte, protocol int, dest VirtualIp) SendMessageRequest {
return SendMessageRequest{message, protocol, EMPTY_VIRTUAL_IP, dest}
}
func MakeSendMessageRequestWithSrc(message []byte, protocol int, src VirtualIp, dest VirtualIp) SendMessageRequest {
return SendMessageRequest{message, protocol, src, dest}
}
func (r *SendMessageRequest) Message() []byte {
return r.message
}
func (r *SendMessageRequest) Protocol() int {
return r.protocol
}
func (r *SendMessageRequest) Dest() VirtualIp {
return r.dest
}
func (r *SendMessageRequest) Src() VirtualIp {
return r.src
}
func (r *SendMessageRequest) HasSrc() bool {
return r.src != EMPTY_VIRTUAL_IP
}
|
package main
import (
"flag"
"github.com/lfxnxf/protobuf_to_sdk/general"
)
var g *general.General
func init() {
//解析参数
//protobuf文件
in := flag.String("in", "", "protobuf file")
//输出模型文件名称
outModelName := flag.String("om", "model", "out model file")
//输出sdk文件名称
outSdkName := flag.String("os", "sdk", "out sdk file")
//模型package
modelPackage := flag.String("mp", "model", "model package")
//sdk package
sdkPackage := flag.String("sp", "sdk", "sdk package")
//是否需要生成common
needCommon := flag.Int64("common", 1, "是否需要生成common")
flag.Parse()
g = general.New(*in, *outModelName, *outSdkName, *modelPackage, *sdkPackage, *needCommon)
}
func main() {
g.Start()
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00700101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.007.001.01 Document"`
Message *AcceptorCancellationAdviceV01 `xml:"AccptrCxlAdvc"`
}
func (d *Document00700101) AddMessage() *AcceptorCancellationAdviceV01 {
d.Message = new(AcceptorCancellationAdviceV01)
return d.Message
}
// Scope
// The AcceptorCancellationAdvice message is sent by a card acceptor to notify the cancellation of a successfully completed card payment transaction. The message can be sent directly to the acquirer or through an agent.
// Usage
// The AcceptorCancellationAdvice message is sent by the card acceptor to an acquirer when the acquirer did not receive a correct response to the AcceptorCompletionAdvice message.
type AcceptorCancellationAdviceV01 struct {
// Cancellation advice message management information.
Header *iso20022.Header2 `xml:"Hdr"`
// Information related to the cancellation advice.
CancellationAdvice *iso20022.AcceptorCancellationAdvice1 `xml:"CxlAdvc"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType3 `xml:"SctyTrlr"`
}
func (a *AcceptorCancellationAdviceV01) AddHeader() *iso20022.Header2 {
a.Header = new(iso20022.Header2)
return a.Header
}
func (a *AcceptorCancellationAdviceV01) AddCancellationAdvice() *iso20022.AcceptorCancellationAdvice1 {
a.CancellationAdvice = new(iso20022.AcceptorCancellationAdvice1)
return a.CancellationAdvice
}
func (a *AcceptorCancellationAdviceV01) AddSecurityTrailer() *iso20022.ContentInformationType3 {
a.SecurityTrailer = new(iso20022.ContentInformationType3)
return a.SecurityTrailer
}
|
package handler
import (
"context"
"time"
server "github.com/micro/go-micro/v2/server"
"github.com/micro/go-micro/v2/util/log"
client "github.com/lecex/core/client"
"github.com/lecex/device-api/config"
cashierPB "github.com/lecex/device-api/proto/cashier"
devicePB "github.com/lecex/device-api/proto/device"
PB "github.com/lecex/user/proto/permission"
)
var Conf = config.Conf
// Register 注册
func Register(Server server.Server) {
cashierPB.RegisterCashiersHandler(Server, &Cashier{Conf.Service["device"]})
devicePB.RegisterDevicesHandler(Server, &Device{Conf.Service["device"]})
go Sync() // 同步前端权限
}
// Sync 同步
func Sync() {
time.Sleep(5 * time.Second)
req := &PB.Request{
Permissions: Conf.Permissions,
}
res := &PB.Response{}
err := client.Call(context.TODO(), Conf.Service["user"], "Permissions.Sync", req, res)
if err != nil {
log.Log(err)
Sync()
}
}
|
package main
import (
"net/http"
"log"
"os/exec"
"strings"
)
// 入口函数
// 入口函数
func main() {
http.HandleFunc("/exec", func(w http.ResponseWriter,r *http.Request) {
defer func(){log.Printf("finished %v\n", r.URL)}()
out,err := genCmd(r).CombinedOutput()
if err!=nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
w.Write(out)
})
log.Fatal(http.ListenAndServe(":8000", nil))
}
func genCmd(r *http.Request) (cmd *exec.Cmd) {
var args []string
if got:=r.FormValue("args");got!="" {
args = strings.Split(got, " ")
}
if c:=r.FormValue("cmd");len(args)==0 {
cmd = exec.Command(c)
}else{
cmd = exec.Command(c, args...)
}
return
}
|
/*
Copyright 2017 The Kubernetes Authors.
SPDX-License-Identifier: Apache-2.0
*/
package oimcsidriver
import (
"context"
"github.com/container-storage-interface/spec/lib/go/csi"
)
func (od *oimDriver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
return &csi.GetPluginInfoResponse{
Name: od.driverName,
VendorVersion: od.version,
}, nil
}
func (od *oimDriver) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) {
return &csi.ProbeResponse{}, nil
}
func (od *oimDriver) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_CONTROLLER_SERVICE,
},
},
},
},
}, nil
}
|
package models
import (
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"net/http"
"strconv"
)
var Db *gorm.DB
// https://www.artacode.com/posts/sql/gorm-err/
func init() {
var err error
//"root:@/healthy?charset=utf8&parseTime=true"
Db, err = gorm.Open("mysql","root:@/healthy?charset=utf8&parseTime=true")
if err != nil {
panic(err.Error())
}
}
// 分頁
func Paginate(r *http.Request) func(db *gorm.DB) *gorm.DB{
return func(db *gorm.DB) *gorm.DB {
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page == 0 {
page = 1
}
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit == 0 {
limit = 20
}
return db.Offset((page - 1) * limit).Limit(limit)
}
}
|
package models
import (
"time"
)
// Application represents the config file up will monitor
type Application struct {
ID int `gorm:"primarykey" json:"id"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"-"`
Name string `yaml:"name" json:"name"`
Protocol string `yaml:"protocol" json:"protocol"`
Expectation string `yaml:"expectation" json:"expectation"`
Target string `yaml:"target" json:"target"`
Interval time.Duration `yaml:"interval" json:"interval"`
Checks []Check `json:"checks"`
Label string `yaml:"label" json:"label"`
Degraded bool `json:"degraded"`
Alerted bool
}
// Check is the datastructure that holds the checks and their results
type Check struct {
ID int `gorm:"primarykey" json:"id"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"-"`
UP bool `json:"up"`
ApplicationID int `json:"applicationId"`
Application Application `json:"-"`
}
|
/*
* Copyright (C) 2019 Rohith Jayawardene <gambol99@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package namespaceclaim
import (
"context"
"fmt"
"net/http"
core "github.com/appvia/hub-apis/pkg/apis/core/v1"
"github.com/appvia/hub-apiserver/pkg/hub"
kubev1 "github.com/appvia/kube-operator/pkg/apis/kube/v1"
"github.com/gambol99/hub-utils/pkg/finalizers"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// Update is resposible for reconciling the resource
func (r *ReconcileNamespaceClaim) Update(
ctx context.Context,
cl client.Client,
cc kubernetes.Interface,
resource *kubev1.NamespaceClaim) error {
uid := string(resource.GetUID())
// --- Logic ---
// we have a client to the remote kubernetes cluster
// we check if the team has a team namespace policy
// we need to check the namespace is there and if not create it
// we need to check the rolebinding exists and if not create it
// we need to check that all the members of the team are in the binding
// we set ourselves as the finalizer on the resource if not there already
// we set the status of the resource to Success and the Phase is Installed
// we sit back, relax and contain our smug smile
team := HubLabel(resource, "team")
workspace := HubLabel(resource, "workspace")
rlog := log.WithValues(
"namespace.name", resource.Spec.Name,
"resource.name", resource.Name,
"resource.namespace", resource.Namespace,
"team.name", team,
"workspace.name", workspace,
"uid", uid)
//
// @step: check the namespace exists, if not create it, else update it
//
annotations := resource.Spec.Annotations
if annotations == nil {
annotations = make(map[string]string, 0)
}
annotations["hub.appvia.io/uid"] = uid
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: resource.Spec.Name,
Labels: resource.Spec.Labels,
Annotations: annotations,
},
}
err := func() error {
if _, err := cc.CoreV1().Namespaces().Get(resource.Spec.Name, metav1.GetOptions{}); err != nil {
if !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to check namespace exists: %s", err)
}
rlog.Info("creating the namespace resource in cluster")
// else we need to create the namespace
if _, err := cc.CoreV1().Namespaces().Create(namespace); err != nil {
return fmt.Errorf("failed to create namespace: %s", err)
}
} else {
rlog.Info("updating the namespace resource in cluster")
if _, err := cc.CoreV1().Namespaces().Update(namespace); err != nil {
return fmt.Errorf("failed to update namespace: %s", err)
}
}
return nil
}()
if err != nil {
resource.Status.Status = core.FailureStatus
resource.Status.Conditions = []core.Condition{{
Message: err.Error(),
Code: http.StatusServiceUnavailable,
}}
return err
}
//
// @step we need to check the rolebinding exists and if not create it
//
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: RoleBindingName,
Namespace: resource.Spec.Name,
Labels: map[string]string{
"hub.appvia.io/team": resource.GetLabels()[hub.Label("team")],
"hub.appvia.io/workspace": resource.GetLabels()[hub.Label("workspace")],
},
Annotations: map[string]string{"hub.appvia.io/uid": uid},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: ClusterRoleName,
},
}
// @step: retrieve all the users in the team
membership, err := MakeTeamMembersList(ctx, cl, resource.GetLabels()[hub.Label("team")])
if err != nil {
resource.Status.Status = core.FailureStatus
resource.Status.Conditions = []core.Condition{{
Detail: err.Error(),
Message: "failed to retrieve a list of users",
Code: http.StatusServiceUnavailable,
}}
return err
}
rlog.WithValues(
"users", len(membership.Items),
).Info("found the x members in the team")
for _, x := range membership.Items {
binding.Subjects = append(binding.Subjects, rbacv1.Subject{
APIGroup: rbacv1.GroupName,
Kind: rbacv1.UserKind,
Name: x.Spec.Username,
})
}
err = func() error {
if _, err := cc.RbacV1().RoleBindings(resource.Spec.Name).Get(RoleBindingName, metav1.GetOptions{}); err != nil {
if !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to check role binding exists: %s", err)
}
if _, err := cc.RbacV1().RoleBindings(resource.Spec.Name).Create(binding); err != nil {
return fmt.Errorf("failed to creale role binding: %s", err)
}
} else {
if _, err := cc.RbacV1().RoleBindings(resource.Spec.Name).Update(binding); err != nil {
return fmt.Errorf("failed to creale role binding: %s", err)
}
}
// @step: set the phase of the resource
resource.Status.Phase = PhaseInstalled
resource.Status.Status = core.SuccessStatus
resource.Status.Conditions = []core.Condition{}
return nil
}()
if err != nil {
resource.Status.Status = core.FailureStatus
resource.Status.Conditions = []core.Condition{{
Message: err.Error(),
Code: http.StatusServiceUnavailable,
}}
return err
}
if err := cl.Status().Update(ctx, resource); err != nil {
log.Error(err, "failed to update the status")
return err
}
//
// @step: set ourselves as the finalizer on the resource if not there already
//
finalizer := finalizers.NewFinalizer(cl, FinalizerName)
if finalizer.NeedToAdd(resource) {
rlog.WithValues(
"finalizer", FinalizerName,
).Info("adding our finalizer to the resource")
if err := finalizer.Add(resource); err != nil {
rlog.Error(err, "failed to add the finalizer to the resource")
return err
}
}
return nil
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-09-30 09:15
# @File : lt_64_Minimum_Path_Sum.go
# @Description :
# @Attention :
*/
package array
/*
最小路径和
依旧为动态规划题
*/
func minPathSum(grid [][]int) int {
if len(grid) == 0 || len(grid[0]) == 0 {
return 0
}
for i := 0; i < len(grid); i++ {
for j := 0; j < len(grid[0]); j++ {
if i == 0 && j > 0 {
grid[i][j] += grid[i][j-1]
}
if j == 0 && i > 0 {
grid[i][j] += grid[i-1][j]
}
if i > 0 && j > 0 {
// 取最小值
grid[i][j] += min(grid[i][j-1], grid[i-1][j])
}
}
}
return grid[len(grid)-1][len(grid[0])-1]
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
|
package nio
import (
"net"
)
/*
type Channel struct {
fd uintptr
interests int
ready int
}
TCPListener
TCPConn
UnixListener
UnixConn
UDPConn
*/
// TODO: SelectionKey 改成Channel,实现net.Listener和net.Conn
// Read:读需要全部读完
// Write:自动维护状态,自动缓存未写完数据
// 上层只需监听读,写由库维护
type SelectionKey struct {
channel interface{} // net.Conn or net.Listener
data interface{} // attachment
fd uintptr // file descriptor
interests int // registered ops
ready int // ready ops
}
func (sk *SelectionKey) reset() {
sk.ready = 0
}
func (sk *SelectionKey) Fd() uintptr {
return sk.fd
}
//func (sk *SelectionKey) Acceptable() bool {
// return sk.ready&OP_ACCEPT != 0
//}
func (sk *SelectionKey) Readable() bool {
return sk.ready&OP_READ != 0
}
func (sk *SelectionKey) Writable() bool {
return sk.ready&OP_WRITE != 0
}
func (sk *SelectionKey) isInterests(ops int) bool {
return sk.interests&ops != 0
}
func (sk *SelectionKey) setReadyIn() {
if sk.isInterests(OP_READ) {
sk.ready |= OP_READ
}
//else if sk.isInterests(OP_ACCEPT) {
// sk.ready |= OP_ACCEPT
//}
}
func (sk *SelectionKey) setReadyOut() {
if sk.isInterests(OP_WRITE) {
sk.ready |= OP_WRITE
}
}
func (sk *SelectionKey) InterestOps() int {
return sk.interests
}
func (sk *SelectionKey) ReadyOps() int {
return sk.ready
}
func (sk *SelectionKey) Channel() interface{} {
return sk.channel
}
func (sk *SelectionKey) Data() interface{} {
return sk.data
}
func (sk *SelectionKey) Accept() (net.Conn, error) {
return nil, ErrNotSupport
}
func (sk *SelectionKey) Read(b []byte) (int, error) {
return Read(sk.fd, b)
}
func (sk *SelectionKey) Write(b []byte) (int, error) {
return Write(sk.fd, b)
}
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io"
"sync"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"helm.sh/helm/v3/cmd/helm/require"
"helm.sh/helm/v3/pkg/getter"
"helm.sh/helm/v3/pkg/repo"
)
const updateDesc = `
Update gets the latest information about charts from the respective chart repositories.
Information is cached locally, where it is used by commands like 'helm search'.
You can optionally specify a list of repositories you want to update.
$ helm repo update <repo_name> ...
To update all the repositories, use 'helm repo update'.
`
var errNoRepositories = errors.New("no repositories found. You must add one before updating")
type repoUpdateOptions struct {
update func([]*repo.ChartRepository, io.Writer, bool) error
repoFile string
repoCache string
names []string
failOnRepoUpdateFail bool
}
func newRepoUpdateCmd(out io.Writer) *cobra.Command {
o := &repoUpdateOptions{update: updateCharts}
cmd := &cobra.Command{
Use: "update [REPO1 [REPO2 ...]]",
Aliases: []string{"up"},
Short: "update information of available charts locally from chart repositories",
Long: updateDesc,
Args: require.MinimumNArgs(0),
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return compListRepos(toComplete, args), cobra.ShellCompDirectiveNoFileComp
},
RunE: func(cmd *cobra.Command, args []string) error {
o.repoFile = settings.RepositoryConfig
o.repoCache = settings.RepositoryCache
o.names = args
return o.run(out)
},
}
f := cmd.Flags()
// Adding this flag for Helm 3 as stop gap functionality for https://github.com/helm/helm/issues/10016.
// This should be deprecated in Helm 4 by update to the behaviour of `helm repo update` command.
f.BoolVar(&o.failOnRepoUpdateFail, "fail-on-repo-update-fail", false, "update fails if any of the repository updates fail")
return cmd
}
func (o *repoUpdateOptions) run(out io.Writer) error {
f, err := repo.LoadFile(o.repoFile)
switch {
case isNotExist(err):
return errNoRepositories
case err != nil:
return errors.Wrapf(err, "failed loading file: %s", o.repoFile)
case len(f.Repositories) == 0:
return errNoRepositories
}
var repos []*repo.ChartRepository
updateAllRepos := len(o.names) == 0
if !updateAllRepos {
// Fail early if the user specified an invalid repo to update
if err := checkRequestedRepos(o.names, f.Repositories); err != nil {
return err
}
}
for _, cfg := range f.Repositories {
if updateAllRepos || isRepoRequested(cfg.Name, o.names) {
r, err := repo.NewChartRepository(cfg, getter.All(settings))
if err != nil {
return err
}
if o.repoCache != "" {
r.CachePath = o.repoCache
}
repos = append(repos, r)
}
}
return o.update(repos, out, o.failOnRepoUpdateFail)
}
func updateCharts(repos []*repo.ChartRepository, out io.Writer, failOnRepoUpdateFail bool) error {
fmt.Fprintln(out, "Hang tight while we grab the latest from your chart repositories...")
var wg sync.WaitGroup
var repoFailList []string
for _, re := range repos {
wg.Add(1)
go func(re *repo.ChartRepository) {
defer wg.Done()
if _, err := re.DownloadIndexFile(); err != nil {
fmt.Fprintf(out, "...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err)
repoFailList = append(repoFailList, re.Config.URL)
} else {
fmt.Fprintf(out, "...Successfully got an update from the %q chart repository\n", re.Config.Name)
}
}(re)
}
wg.Wait()
if len(repoFailList) > 0 && failOnRepoUpdateFail {
return fmt.Errorf("Failed to update the following repositories: %s",
repoFailList)
}
fmt.Fprintln(out, "Update Complete. ⎈Happy Helming!⎈")
return nil
}
func checkRequestedRepos(requestedRepos []string, validRepos []*repo.Entry) error {
for _, requestedRepo := range requestedRepos {
found := false
for _, repo := range validRepos {
if requestedRepo == repo.Name {
found = true
break
}
}
if !found {
return errors.Errorf("no repositories found matching '%s'. Nothing will be updated", requestedRepo)
}
}
return nil
}
func isRepoRequested(repoName string, requestedRepos []string) bool {
for _, requestedRepo := range requestedRepos {
if repoName == requestedRepo {
return true
}
}
return false
}
|
package models
import "time"
type Client struct {
ID uint `gorm:"primary_key" json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DiscordID string `json:"discord_id" gorm:"unique_index:idx_client_discord_id_guild_id"`
UUID string `json:"uuid" gorm:"unique"`
LastRequestAt time.Time `json:"last_request_at"`
GuildID string `json:"guild_id" gorm:"unique_index:idx_client_discord_id_guild_id"`
}
type ClientRepository struct{}
func (r *ClientRepository) Get(discordId string) (*Client, error) {
db, err := GetDatabase()
if err != nil {
return nil, err
}
var c Client
err = db.Find(&c, "discord_id = ?", discordId).Error
return &c, err
}
|
// Package spec specifies valid audio formats
package spec
|
package 性质判定
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
// ------------------ 独立写的代码 ------------------
func isSubStructure(A *TreeNode, B *TreeNode) bool {
if B == nil {
return false
}
return getIsSubStructure(A, B)
}
func getIsSubStructure(A *TreeNode, B *TreeNode) bool {
if B == nil {
return true
}
if A == nil {
return false
}
return A.Val == B.Val && isSubStructureWithSpecificRoot(A.Left, B.Left) && isSubStructureWithSpecificRoot(A.Right, B.Right) ||
getIsSubStructure(A.Left, B) ||
getIsSubStructure(A.Right, B)
}
func isSubStructureWithSpecificRoot(specificRoot, B *TreeNode) bool {
if B == nil {
return true
}
if specificRoot == nil {
return false
}
return specificRoot.Val == B.Val && isSubStructureWithSpecificRoot(specificRoot.Left, B.Left) && isSubStructureWithSpecificRoot(specificRoot.Right, B.Right)
}
// ------------------ 看了官方题解后写的代码 ------------------
func isSubStructure(A *TreeNode, B *TreeNode) bool {
if B == nil || A == nil {
return false
}
return A.Val == B.Val && isSubStructureWithSpecificRoot(A.Left, B.Left) && isSubStructureWithSpecificRoot(A.Right, B.Right) ||
isSubStructure(A.Left, B) ||
isSubStructure(A.Right, B)
}
func isSubStructureWithSpecificRoot(specificRoot, B *TreeNode) bool {
if B == nil {
return true
}
if specificRoot == nil {
return false
}
return specificRoot.Val == B.Val && isSubStructureWithSpecificRoot(specificRoot.Left, B.Left) && isSubStructureWithSpecificRoot(specificRoot.Right, B.Right)
}
/*
总结:
1. 这题比较恶心的是: (约定空树不是任意一个树的子结构)
*/
|
package cutout
func executeFallbacks(fbf []func() (*Response, error)) (*Response, error) {
fResp := &Response{}
var err error
for _, fb := range fbf { //as cutout supports multi-level fallbacks
fResp, err = fb()
if err != nil {
continue // if one fails, try the next one
}
break
}
return fResp, err
}
|
package main
import (
"fmt"
"net/http"
)
type MyMux struct {
}
func (p *MyMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
sayhelloName(w, r)
return
}
//open http://localhost:9090/alex
//it will run openALex function
if r.URL.Path =="/alex"{
openAlex(w,r)
return
}
http.NotFound(w, r)
return
}
func openAlex(w http.ResponseWriter,r *http.Request){
fmt.Fprintf(w,"hello alex!")
}
func sayhelloName(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello myroute!")
}
func main() {
mux := &MyMux{}
http.ListenAndServe(":9090", mux)
}
|
package main
import (
"fmt"
"image"
"gopkg.in/karalabe/cookiejar.v1/collections/deque"
)
type linkedmap map[image.Point]map[image.Point]bool
func buildlinkedmap(input string) linkedmap {
start := image.Point{0, 0}
s := deque.New()
s.PushRight(start)
linked := linkedmap{}
cur := image.Point{0, 0}
dmap := map[rune]image.Point{'N': image.Point{0, -1}, 'S': image.Point{0, 1}, 'E': image.Point{1, 0}, 'W': image.Point{-1, 0}}
for _, r := range input {
switch r {
case 'N', 'S', 'E', 'W':
{
delta := dmap[r]
if _, found := linked[cur]; !found {
linked[cur] = map[image.Point]bool{}
}
linked[cur][delta] = true
cur = cur.Add(delta)
backwards := delta.Mul(-1)
if _, found := linked[cur]; !found {
linked[cur] = map[image.Point]bool{}
}
linked[cur][backwards] = true
}
case '(':
s.PushRight(cur)
case ')':
s.PopRight()
case '|':
cur = s.Right().(image.Point)
}
}
return linked
}
func findmaxdoors(linked linkedmap) int {
start := image.Point{0, 0}
maxdistance := 0
type node struct {
distance int
coord image.Point
}
initial := node{0, start}
q := deque.New()
q.PushRight(initial)
seen := map[image.Point]bool{start: true}
for !q.Empty() {
n := q.PopLeft().(node)
if n.distance > maxdistance {
maxdistance = n.distance
}
for delta := range linked[n.coord] {
dn := n.coord.Add(delta)
if seen[dn] {
continue
}
seen[dn] = true
q.PushRight(node{n.distance + 1, dn})
}
}
return maxdistance
}
func part1(input string) {
lm := buildlinkedmap(input)
fmt.Println("Max doors ", findmaxdoors(lm))
}
func main() {
part1(testdata1())
part2(testdata2())
}
func testdata1() string {
return `^WSSEESWWWNW(S|NENNEEEENN(ESSSSW(NWSW|SSEN)|WSWWN(E|WWS(E|SS))))$`
}
func testdata2() string {
return `^ESSWWN(E|NNENN(EESS(WNSE|)SSS|WWWSSSSE(SW|NNNE)))$`
}
|
package repository
import (
"io/ioutil"
"net/http"
"net/url"
"path"
"strings"
"github.com/dghubble/sling"
"github.com/ghodss/yaml"
"github.com/jinzhu/copier"
"github.com/mojo-zd/helm-api/pkg/typed/charts"
"github.com/rs/zerolog/log"
helmrepo "helm.sh/helm/v3/pkg/repo"
)
var indexYAML = "index.yaml"
type repoOptions struct {
name string
url string
username string
password string
certFile string
keyFile string
caFile string
insecureSkipTLSverify bool
repoFile string
repoCache string
}
// NewRepoOption create repo option
func NewRepoOption(opts ...repoOption) *repoOptions {
opt := new(repoOptions)
for _, o := range opts {
o(opt)
}
return opt
}
// GetRepo get charts from repository
func (repo *repoOptions) GetRepo() (*helmrepo.IndexFile, []byte, error) {
u, err := parseURL(repo.url)
if err != nil {
return nil, nil, err
}
resp, err := repo.index(u)
if err != nil {
return nil, resp, err
}
index, err := repo.toIndexFile(resp)
return index, resp, err
}
func (repo *repoOptions) ChartsFromIndex(index *helmrepo.IndexFile) []charts.Chart {
var charts []charts.Chart
for _, entry := range index.Entries {
// 如果最新版本都弃用了 则跳过该chart
if entry[0].Deprecated {
log.Warn().Str("chart", entry[0].Name).Msg("chart has deprecated!!!")
continue
}
c, err := newChart(entry)
if err != nil {
return charts
}
c.RepoName = repo.name
c.RepoURL = repo.url
charts = append(charts, c)
}
return charts
}
func newChart(entry helmrepo.ChartVersions) (charts.Chart, error) {
var c charts.Chart
chartVer := entry[0]
if err := copier.Copy(&c, chartVer); err != nil {
log.Error().Err(err).Str("chart name", chartVer.Name).Msg("copy chart failed")
return c, err
}
if err := copier.Copy(&c.ChartVersions, entry); err != nil {
log.Error().Err(err).Str("chart name", chartVer.Name).Msg("copy chart version failed")
}
return c, nil
}
func (repo *repoOptions) toIndexFile(body []byte) (*helmrepo.IndexFile, error) {
var index helmrepo.IndexFile
err := yaml.Unmarshal(body, &index)
if err != nil {
log.Error().Err(err).Msg("byte to yaml failed")
return nil, err
}
index.SortEntries()
return &index, nil
}
func (repo *repoOptions) index(indexURL *url.URL) ([]byte, error) {
indexURL.Path = path.Join(indexURL.Path, indexYAML)
request, err := sling.New().Get(indexURL.String()).Request()
if err != nil {
log.Error().Err(err).Str("request url", indexURL.String()).Msg("request index failed")
return nil, err
}
client := new(http.Client)
resp, err := client.Do(request)
if err != nil {
log.Error().Err(err).Str("request url", indexURL.String()).Msg("do request failed")
return nil, err
}
defer func() {
err = resp.Body.Close()
}()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error().Err(err).Msg("can't read response body")
return nil, err
}
return body, nil
}
func parseURL(repoURL string) (*url.URL, error) {
repoURL = strings.TrimSpace(repoURL)
u, err := url.ParseRequestURI(repoURL)
if err != nil {
log.Error().Err(err).Msg("parse url failed")
return nil, err
}
return u, nil
}
|
package main
import (
"fmt"
)
type Node struct {
Value int
Left *Node
Right *Node
}
type Tree struct {
Root *Node
}
func (t *Tree) Insert(value int) {
// First insert, set root.
if t.Root == nil {
t.Root = &Node{Value: value}
} else {
t.Root.Insert(value)
}
}
func (t *Tree) Print() {
t.Root.PrintTree()
}
func (t *Tree) Find(value int) bool {
return t.Root.Find(value)
}
func (n *Node) Insert(value int) {
// Duplicates are no-op.
switch {
case value < n.Value:
// At left leaf, create new child.
if n.Left == nil {
n.Left = &Node{Value: value}
} else {
// Recurse left subtree.
n.Left.Insert(value)
}
case value > n.Value:
// At right leaf, create new child.
if n.Right == nil {
n.Right = &Node{Value: value}
} else {
// Recurse right subtree.
n.Right.Insert(value)
}
}
}
// Print the tree in order
func (n *Node) PrintTree() {
if n.Left != nil {
n.Left.PrintTree()
}
fmt.Println(n.Value)
if n.Right != nil {
n.Right.PrintTree()
}
}
func (n *Node) Find(value int) bool {
if n.Value > value {
if n.Left == nil {
return false
} else {
return n.Left.Find(value)
}
} else if n.Value < value {
if n.Right == nil {
return false
} else {
return n.Right.Find(value)
}
}
return true
}
func (n *Node) Height() int {
var left, right int = 0, 0
if n.Left != nil {
left = n.Left.Height()
}
if n.Right != nil {
right = n.Right.Height()
}
return 1 + maxInt(left, right)
}
func maxInt(a int, b int) int {
if a >= b {
return a
}
return b
}
func main() {
t := Tree{}
t.Insert(7)
t.Insert(1)
t.Insert(14)
t.Insert(4)
t.Insert(9)
t.Insert(2)
t.Insert(0)
t.Print()
fmt.Println(t.Find(4))
fmt.Println(t.Root.Height())
}
|
/*
* Licensed to the OpenSkywalking under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenSkywalking licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package trace
import "fmt"
type CarrierItem struct {
headKey string
headValue string
next *CarrierItem
}
func newCarrierItem(headKey string, headValue string, next *CarrierItem, namespace string) *CarrierItem {
c := &CarrierItem{}
if len(namespace) > 0 {
s := fmt.Sprintf("%s-%s", namespace, headKey)
c.headKey = s
} else {
c.headKey = headKey
}
c.headValue = headValue
c.next = next
return c
}
type SW3CarrierItem struct {
carrier *CarrierItem
contextCarrier ContextCarrier
}
func newSW3CarrierItem(carrier ContextCarrier, next *CarrierItem, namespace string) *SW3CarrierItem {
sw := &SW3CarrierItem{}
sw.carrier = newCarrierItem("sw3", *carrier.Serialize(), next, namespace)
sw.carrier = next
return sw
}
func (c *SW3CarrierItem) getHeadValue() string {
return c.carrier.headValue
}
func (c *SW3CarrierItem) setHeadValue(value string) error {
_, err := c.contextCarrier.Deserialize(value)
if err != nil {
return err
}
return nil
}
type CarrierItemHead struct {
carrierItem CarrierItem
}
func newCarrierItemHead(next *CarrierItem, namespace string) *CarrierItemHead {
c := &CarrierItemHead{}
c.carrierItem = *newCarrierItem("", "", next, namespace)
return c
}
|
package main
import "fmt"
type money float64
func (m money) Currency(dollar money) {
m = dollar
fmt.Println(m)
}
func main() {
var dollar money = 70.4
var m1 money
m1.Currency(dollar)
}
|
package synctest
import "sync"
// NotifyingLocker is an implementation of sync.Locker that notifies callers when
// locks and unlocks happen. otherwise, it behaves identically as a sync.Mutex.
//
// Example usage:
// nl := NewNotifyingLocker()
// lch := nl.NotifyLock()
// uch := nl.NotifyUnlock()
// go func() {
// nl.Lock()
// }()
// go func() {
// <-lch // wait for nl.Lock() to be called in the goroutine above
// nl.Unlock()
// }()
// <-uch // wait for nl.Unlock to be called
type NotifyingLocker struct {
unlockChans []chan struct{}
unlockChansLock *sync.Mutex
lockChans []chan struct{}
lockChansLock *sync.Mutex
lck *sync.Mutex
}
// NewNotifyingLocker creates a new NotifyingLocker ready for use
func NewNotifyingLocker() *NotifyingLocker {
return &NotifyingLocker{
unlockChans: nil,
unlockChansLock: &sync.Mutex{},
lockChans: nil,
lockChansLock: &sync.Mutex{},
lck: &sync.Mutex{},
}
}
// NotifyLock returns a channel that will close when n is locked. The channel
// never sends and will be closed immediately if n is already locked
func (n *NotifyingLocker) NotifyLock() <-chan struct{} {
n.lockChansLock.Lock()
defer n.lockChansLock.Unlock()
ch := make(chan struct{})
n.lockChans = append(n.lockChans, ch)
return ch
}
// NotifyUnlock returns a channel that will close when n is unlocked. The channel
// never sends and will be closed immediately if n is already unlocked.
func (n *NotifyingLocker) NotifyUnlock() <-chan struct{} {
n.unlockChansLock.Lock()
defer n.unlockChansLock.Unlock()
ch := make(chan struct{})
n.unlockChans = append(n.unlockChans, ch)
return ch
}
// Lock locks n and closes all unclosed channels returned previously by NotifyLock
func (n *NotifyingLocker) Lock() {
n.lck.Lock()
n.lockChansLock.Lock()
defer n.lockChansLock.Unlock()
for _, lck := range n.lockChans {
close(lck)
}
n.lockChans = nil
}
// Unlock unlocks n and closes all unclosed channels returned previously by NotifyUnlock
func (n *NotifyingLocker) Unlock() {
n.lck.Unlock()
n.unlockChansLock.Lock()
defer n.unlockChansLock.Unlock()
for _, lck := range n.unlockChans {
close(lck)
}
n.unlockChans = nil
}
|
package main
import (
"flag"
"fmt"
"strconv"
"strings"
"github.com/dah8ra/ch4/xkcdcom"
)
var word = flag.String("w", "default", "Search word")
const preurl = "https://xkcd.com/"
const sufurl = "/info.0.json"
var x xkcdcom.Xkcd
func main() {
m := make(map[string]string)
for i := 570; i < 572; i++ {
url := preurl + strconv.Itoa(i) + sufurl
fmt.Printf("-------> %s\n", url)
r, _ := xkcdcom.Get(url)
m[url] = r.Transcript
fmt.Printf("#%-5d %s %.10s\n", r.Num, r.Title, r.Transcript)
}
fmt.Println("@@@@@@@@@@@@")
flag.Parse()
// search := []byte(*word)
for url, trans := range m {
if strings.Contains(trans, *word) {
fmt.Printf("%s\n%s\n@@@\n", url, trans)
}
}
}
|
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package sample
import (
"net/http"
"google.golang.org/appengine/log"
)
// [START communication_between_modules_1]
import "google.golang.org/appengine"
func handler(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
module := appengine.ModuleName(ctx)
instance := appengine.InstanceID()
log.Infof(ctx, "Received on module %s; instance %s", module, instance)
}
// [END communication_between_modules_1]
func handler2(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
// [START communication_between_modules_2]
hostname, err := appengine.ModuleHostname(ctx, "my-backend", "", "")
if err != nil {
// ...
}
url := "http://" + hostname + "/"
// ...
// [END communication_between_modules_2]
_ = url
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.