text
stringlengths 11
4.05M
|
|---|
package theory
import (
"buddin.us/musictheory"
"buddin.us/musictheory/intervals"
lua "github.com/yuin/gopher-lua"
)
func newChord(state *lua.LState) int {
pitch := state.CheckString(1)
root, err := musictheory.ParsePitch(pitch)
if err != nil {
state.RaiseError(err.Error())
}
var (
name = state.CheckString(2)
series []musictheory.Interval
)
switch name {
case "majorTriad", "maj":
series = intervals.MajorTriad
case "majorSixth", "maj6":
series = intervals.MajorSixth
case "majorSeventh", "maj7":
series = intervals.MajorSeventh
case "dominantSeventh":
series = intervals.DominantSeventh
case "minorTriad", "min":
series = intervals.MinorTriad
case "minorSixth", "min6":
series = intervals.MinorSixth
case "minorSeventh", "min7":
series = intervals.MinorSeventh
case "halfDiminishedSeventh", "min7b5":
series = intervals.HalfDiminishedSeventh
case "diminishedTriad", "dim":
series = intervals.DiminishedTriad
case "diminishedSeventh", "dim7":
series = intervals.DiminishedSeventh
case "diminishedMajorSeventh", "dimMaj7":
series = intervals.DiminishedMajorSeventh
case "augmentedTriad", "aug":
series = intervals.AugmentedTriad
case "augmentedSixth", "aug6":
series = intervals.AugmentedSixth
case "augmentedSeventh", "aug7":
series = intervals.AugmentedSeventh
case "augmentedMajorSeventh", "augMaj7":
series = intervals.AugmentedMajorSeventh
default:
state.RaiseError("unknown scale intervals %s", name)
}
state.Push(newChordUserData(state, musictheory.NewChord(root, series)))
return 1
}
func newChordUserData(state *lua.LState, chord musictheory.Chord) *lua.LUserData {
methods := state.NewTable()
state.SetFuncs(methods, map[string]lua.LGFunction{
"pitches": func(state *lua.LState) int {
chord := state.CheckUserData(1).Value.(musictheory.Chord)
t := state.NewTable()
for _, p := range chord {
t.Append(newPitchUserData(state, p))
}
state.Push(t)
return 1
},
"transpose": func(state *lua.LState) int {
chord := state.CheckUserData(1).Value.(musictheory.Chord)
intervalUD := state.CheckUserData(2)
if interval, ok := intervalUD.Value.(musictheory.Interval); ok {
state.Push(newChordUserData(state, chord.Transpose(interval).(musictheory.Chord)))
return 1
}
state.RaiseError("argument is not an interval")
return 1
},
"invert": func(state *lua.LState) int {
chord := state.CheckUserData(1).Value.(musictheory.Chord)
degree := int(state.CheckNumber(2))
state.Push(newChordUserData(state, chord.Invert(degree)))
return 1
},
"size": func(state *lua.LState) int {
chord := state.CheckUserData(1).Value.(musictheory.Chord)
state.Push(lua.LNumber(len(chord)))
return 1
},
})
mt := state.NewTable()
mt.RawSetString("__index", methods)
return &lua.LUserData{
Metatable: mt,
Value: chord,
}
}
|
package fakes
import "github.com/cloudfoundry-incubator/notifications/models"
type KindsRepo struct {
Kinds map[string]models.Kind
UpsertError error
TrimError error
FindError error
TrimArguments []interface{}
}
func NewKindsRepo() *KindsRepo {
return &KindsRepo{
Kinds: make(map[string]models.Kind),
TrimArguments: make([]interface{}, 0),
}
}
func (fake *KindsRepo) Create(conn models.ConnectionInterface, kind models.Kind) (models.Kind, error) {
key := kind.ID + kind.ClientID
if _, ok := fake.Kinds[key]; ok {
return kind, models.ErrDuplicateRecord{}
}
fake.Kinds[key] = kind
return kind, nil
}
func (fake *KindsRepo) Update(conn models.ConnectionInterface, kind models.Kind) (models.Kind, error) {
key := kind.ID + kind.ClientID
fake.Kinds[key] = kind
return kind, nil
}
func (fake *KindsRepo) Upsert(conn models.ConnectionInterface, kind models.Kind) (models.Kind, error) {
key := kind.ID + kind.ClientID
fake.Kinds[key] = kind
return kind, fake.UpsertError
}
func (fake *KindsRepo) Find(conn models.ConnectionInterface, id, clientID string) (models.Kind, error) {
key := id + clientID
if kind, ok := fake.Kinds[key]; ok {
return kind, fake.FindError
}
return models.Kind{}, models.ErrRecordNotFound{}
}
func (fake *KindsRepo) Trim(conn models.ConnectionInterface, clientID string, kindIDs []string) (int, error) {
fake.TrimArguments = []interface{}{clientID, kindIDs}
return 0, fake.TrimError
}
|
package main
import (
"errors"
"image"
"image/color"
"image/jpeg"
_ "image/png"
"log"
"math"
"os"
"runtime"
"runtime/pprof"
"time"
"github.com/samuel/go-astar/astar"
)
func abs(v int) int {
if v < 0 {
return -v
}
return v
}
type ImageMap struct {
Pix []byte
YStride, XStride int
Width, Height int
Stddev float64
setter func(x, y int, c color.Color)
}
func colorCost(c1, c2 byte) float64 {
a := abs(int(c1) - int(c2))
return float64(a * a)
}
func NewImageMap(img image.Image) (*ImageMap, error) {
var im *ImageMap
switch m := img.(type) {
case *image.YCbCr:
im = &ImageMap{
Pix: m.Y,
YStride: m.YStride,
XStride: 1,
Width: img.Bounds().Dx(),
Height: img.Bounds().Dy(),
Stddev: 1.0,
}
var verticalRes, horizontalRes int
switch m.SubsampleRatio {
case image.YCbCrSubsampleRatio420:
verticalRes = 2
horizontalRes = 2
case image.YCbCrSubsampleRatio422:
verticalRes = 1
horizontalRes = 2
case image.YCbCrSubsampleRatio440:
verticalRes = 2
horizontalRes = 1
case image.YCbCrSubsampleRatio444:
verticalRes = 1
horizontalRes = 1
default:
return nil, errors.New("unsupported YCbCr subsample ratio")
}
im.setter = func(x, y int, c color.Color) {
r, g, b, _ := c.RGBA()
yc, cb, cr := color.RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
m.Y[y*m.YStride+x] = yc
off := y/verticalRes*m.CStride + x/horizontalRes
m.Cb[off] = cb
m.Cr[off] = cr
}
case *image.RGBA:
im = &ImageMap{
Pix: m.Pix[1:],
YStride: m.Stride,
XStride: 4,
Width: img.Bounds().Dx(),
Height: img.Bounds().Dy(),
Stddev: 1.0,
setter: m.Set,
}
case *image.Gray:
im = &ImageMap{
Pix: m.Pix,
YStride: m.Stride,
XStride: 1,
Width: img.Bounds().Dx(),
Height: img.Bounds().Dy(),
Stddev: 1.0,
setter: m.Set,
}
default:
return nil, errors.New("Unsupported image format")
}
m := -1.0
s := 0.0
count := 0
for y := 0; y < im.Height; y++ {
for x := 0; x < im.Width; x++ {
count++
v := float64(im.Pix[y*im.YStride+x*im.XStride])
oldM := m
if oldM == -1 {
m = v
s = 0
} else {
m = oldM + ((v - oldM) / float64(count))
s += (v - oldM) * (v - m)
}
}
}
stddev := math.Sqrt(s / float64(count-1))
im.Stddev = stddev
return im, nil
}
func (im *ImageMap) Neighbors(node astar.Node, edges []astar.Edge) ([]astar.Edge, error) {
x := int(node) % im.Width
y := int(node) / im.Width
off := y*im.YStride + x*im.XStride
c := im.Pix[off]
if x > 0 {
edges = append(edges, astar.Edge{Node: node - 1, Cost: 1 + colorCost(c, im.Pix[off-im.XStride])})
if y > 0 {
edges = append(edges, astar.Edge{Node: node - 1 - astar.Node(im.Width), Cost: math.Sqrt2 + colorCost(c, im.Pix[off-im.XStride-im.YStride])})
}
if y < im.Height-1 {
edges = append(edges, astar.Edge{Node: node - 1 + astar.Node(im.Width), Cost: math.Sqrt2 + colorCost(c, im.Pix[off-im.XStride+im.YStride])})
}
}
if x < im.Width-1 {
edges = append(edges, astar.Edge{Node: node + 1, Cost: 1 + colorCost(c, im.Pix[off+im.XStride])})
if y > 0 {
edges = append(edges, astar.Edge{Node: node + 1 - astar.Node(im.Width), Cost: math.Sqrt2 + colorCost(c, im.Pix[off+im.XStride-im.YStride])})
}
if y < im.Height-1 {
edges = append(edges, astar.Edge{Node: node + 1 + astar.Node(im.Width), Cost: math.Sqrt2 + colorCost(c, im.Pix[off+im.XStride+im.YStride])})
}
}
if y > 0 {
edges = append(edges, astar.Edge{Node: node - astar.Node(im.Width), Cost: 1 + colorCost(c, im.Pix[off-im.YStride])})
}
if y < im.Height-1 {
edges = append(edges, astar.Edge{Node: node + astar.Node(im.Width), Cost: 1 + colorCost(c, im.Pix[off+im.YStride])})
}
return edges, nil
}
func (im *ImageMap) HeuristicCost(start, end astar.Node) (float64, error) {
endY := int(end) / im.Width
endX := int(end) % im.Width
startY := int(start) / im.Width
startX := int(start) % im.Width
a := abs(endY - startY)
b := abs(endX - startX)
return math.Sqrt(float64(a*a + b*b)), nil // * im.Stddev / 2, nil
}
func (im *ImageMap) Set(x, y int, c color.Color) {
im.setter(x, y, c)
}
func main() {
if len(os.Args) < 2 {
log.Fatal("syntax: imagepath [path]")
}
rd, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer rd.Close()
img, _, err := image.Decode(rd)
if err != nil {
log.Fatal(err)
}
log.Println("Processing image")
im, err := NewImageMap(img)
if err != nil {
log.Fatal(err)
}
if len(os.Args) > 2 {
wr, err := os.Create("cpu.prof")
if err != nil {
log.Fatal(err)
}
defer wr.Close()
if err := pprof.StartCPUProfile(wr); err != nil {
log.Fatal(err)
}
}
log.Println("Finding path")
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
totalAlloc := memStats.TotalAlloc
t := time.Now()
path, err := astar.FindPath(im, 0, astar.Node(img.Bounds().Dx()-1+img.Bounds().Dx()*(img.Bounds().Dy()-1)))
pprof.StopCPUProfile()
if err != nil {
log.Fatal(err)
}
log.Printf("\t%d ms", time.Since(t).Nanoseconds()/1e6)
runtime.ReadMemStats(&memStats)
log.Printf("\t%d MB allocated", (memStats.TotalAlloc-totalAlloc)/(1024*1024))
log.Printf("Nodes in path: %d", len(path))
log.Println("Rendering path")
for _, node := range path {
x := int(node) % img.Bounds().Dx()
y := int(node) / img.Bounds().Dx()
im.Set(x, y, color.RGBA{0, 255, 0, 255})
}
log.Println("Encoding/writing output image")
wr, err := os.Create("out.jpg")
if err != nil {
log.Fatal(err)
}
if err := jpeg.Encode(wr, img, nil); err != nil {
log.Fatal(err)
}
wr.Close()
}
|
// ˅
package main
import (
"github.com/lxn/walk"
)
// ˄
type ColleagueRadioButton struct {
// ˅
// ˄
Colleague
radioButton *walk.RadioButton
// ˅
// ˄
}
func NewColleagueRadioButton(radioButton *walk.RadioButton) *ColleagueRadioButton {
// ˅
colleagueRadioButton := &ColleagueRadioButton{}
colleagueRadioButton.Colleague = *NewColleague()
colleagueRadioButton.radioButton = radioButton
return colleagueRadioButton
// ˄
}
// Set enable/disable from the Mediator
func (self *ColleagueRadioButton) SetActivation(isEnable bool) {
// ˅
self.radioButton.SetEnabled(isEnable)
// ˄
}
func (self *ColleagueRadioButton) OnClicked() {
// ˅
self.mediator.ColleagueChanged()
// ˄
}
func (self *ColleagueRadioButton) IsSelected() bool {
// ˅
return self.radioButton.Checked()
// ˄
}
// ˅
// ˄
|
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
"strings"
)
func main() {
n := readInt64()
if n == 0 {
fmt.Println(0)
os.Exit(0)
}
if n < 10 {
fmt.Println(n)
os.Exit(0)
}
N := getNumberOfDigits(n)
maxnum := int64(0)
for i := 1; i < N; i++ {
digitVal := getDigitValue(n, i)
if digitVal != 9 {
n = n - ((int64(digitVal) + 1) * int64(math.Pow(10.0, float64(i-1))))
}
// fmt.Println(i, ":", n)
maxnum = max(maxnum, calcDigitsSum(n))
}
fmt.Println(maxnum)
}
func getNumberOfDigits(n int64) int {
cnt := 0
for {
cnt++
n = int64(n / 10)
if n == 0 {
break
}
}
return cnt
}
func getDigitValue(n int64, idx int) int {
cnt := 0
for {
cnt++
ret := n % 10
n = int64(n / 10)
if idx == cnt {
return int(ret)
}
if n == 0 {
fmt.Println("Error@getDigitValue")
break
}
}
return -1 // invalid value
}
func calcDigitsSum(n int64) int64 {
sum := int64(0)
for {
sum += n % 10
n = int64(n / 10)
if n == 0 {
break
}
}
return sum
}
func max(a, b int64) int64 {
if a > b {
return a
}
return b
}
func readInt64() int64 {
var a int64
fmt.Scan(&a)
return a
}
func readInt64Line() []int64 {
var sc = bufio.NewScanner(os.Stdin)
sc.Scan()
line := sc.Text()
vals := strings.Split(line, " ")
var vec []int64
for _, v := range vals {
n, err := strconv.ParseInt(v, 10, 64)
if err != nil {
panic(err)
}
vec = append(vec, n)
}
return vec
}
func sumInt64Array(arr []int64) int64 {
var sum int64
for _, val := range arr {
sum += val
}
return sum
}
// 3141592653589793
|
package models
// PokemonSpecies follows the naming convention provided here: https://pokeapi.co/docs/v2.html/#pokemon-species
//
// For the purposes of this test we have only implemented the fields we require
type PokemonSpecies struct {
Name string `json:"name"`
FlavorTextEntries []FlavorText `json:"flavor_text_entries"`
}
|
// Copyright (c) 2013 - Max Persson <max@looplab.se>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fsm
// Event is the info that get passed as a reference in the callbacks.
type Event struct {
// FSM is an reference to the current FSM.
FSM *FSM
// Event is the event name.
Event string
// Src is the state before the transition.
Src string
// Dst is the state after the transition.
Dst string
// Err is an optional error that can be returned from a callback.
Err error
// Args is an optional list of arguments passed to the callback.
Args []interface{}
// canceled is an internal flag set if the transition is canceled.
canceled bool
// async is an internal flag set if the transition should be asynchronous
async bool
// cancelFunc is called in case the event is canceled.
cancelFunc func()
}
// Cancel can be called in before_<EVENT> or leave_<STATE> to cancel the
// current transition before it happens. It takes an optional error, which will
// overwrite e.Err if set before.
func (e *Event) Cancel(err ...error) {
e.canceled = true
e.cancelFunc()
if len(err) > 0 {
e.Err = err[0]
}
}
// Async can be called in leave_<STATE> to do an asynchronous state transition.
//
// The current state transition will be on hold in the old state until a final
// call to Transition is made. This will complete the transition and possibly
// call the other callbacks.
func (e *Event) Async() {
e.async = true
}
|
package main
import "fmt"
func twoSum(nums []int, target int) []int {
data := make(map[int]int)
for i, x := range nums {
if p, ok := data[target-x]; ok {
return []int{p, i}
}
data[x] = i
}
return nil
}
func main() {
fmt.Println(twoSum([]int{2, 7, 9, 11}, 9))
}
|
package background
type RepeatType string
const Repeat RepeatType = "repeat"
const RepeatX RepeatType = "repeat-x"
const RepeatY RepeatType = "repeat-y"
const NoRepeat RepeatType = "no-repeat"
const RepeatInitial RepeatType = "initial"
const RepeatInherit RepeatType = "inherit"
type PositionType string
const LeftTop PositionType = "left top"
const LeftCenter PositionType = "left center"
const LeftBottom PositionType = "left bottom"
const RightTop PositionType = "right top"
const RightCenter PositionType = "right center"
const RightBottom PositionType = "right bottom"
const CenterTop PositionType = "center top"
const CenterCenter PositionType = "center center"
const CenterBottom PositionType = "center bottom"
const BottomLeft PositionType = "bottom left"
const BottomCenter PositionType = "bottom center"
const Top PositionType = "top"
|
package cart
import (
"github.com/gingerxman/eel"
)
//CartItem Model
type CartItem struct {
eel.Model
UserId int
CorpId int
PoolProductId int `gorm:"index"`
ProductSkuName string `gorm:"size:256"`
ProductSkuDisplayName string `gorm:"size:256"`
Count int
}
func (self *CartItem) TableName() string {
return "cart_item"
}
func (this *CartItem) TableIndex() [][]string {
return [][]string{
[]string{"UserId", "CorpId", "PoolProductId"},
}
}
func init() {
eel.RegisterModel(new(CartItem))
}
|
package onelogin
import (
"fmt"
"errors"
"strconv"
"github.com/op/go-logging"
)
func New(shard string, client_id string, client_secret string, subdomain string, loglevel logging.Level)(*OneLogin) {
ol := OneLogin{Shard:shard, Client_id: client_id, Client_secret:client_secret, SubDomain: subdomain}
ol.SetLogLevel(loglevel)
return &ol
}
func (o *OneLogin) SetLogLevel(loglevel logging.Level) {
SetLogLevel(loglevel)
}
// GetUrl creates a URL given the URI and any given args.
// Returns a URL
func (o *OneLogin) GetUrl(uri string, args... string)(string) {
// Handle cases where the uri requires variable replacements (ie. /api/1/user/%d/roles)
fulluri := uri
if len(args) > 0 {
// Convert to slice of interface so that the slice can be sent in as a variadic argument
argint := make([]interface{}, len(args))
for index, value := range args { argint[index] = value }
fulluri = fmt.Sprintf(uri, argint...)
}
if o.CustomURL != "" {
return fmt.Sprintf("%s/%s", o.CustomURL, fulluri)
}
return fmt.Sprintf("%s/%s", fmt.Sprintf(ONELOGIN_URL, o.Shard), fulluri)
}
// Convenience function to always return a token, generating or refreshing if necessary.
// TODO: Enable refreshing when necessary.
//
func (o *OneLogin) Get_Token()(*OneLogin_Token, error) {
if o.Token == nil {
o.Token = &OneLogin_Token{
Endpoint : o.GetUrl(""),
Client_id : o.Client_id,
Client_secret: o.Client_secret,
}
err := o.Token.Get() ; if err != nil {
return nil, ErrorOcurred(err)
}
}
logger.Debugf("Token: %s", o.Token)
return o.Token, nil
}
/**
** Performs an authentication request for a user. An optional MFA token can be passed in with
** the assumption that it will be required. If no token is passed in and one is required,
** authentication will fail.
**/
func (o *OneLogin) Authenticate(username string, password string, token string)(error) {
logger.Debugf("Authenticating user %s", username)
auth_request := AuthenticationRequest{
Username_or_email: username,
Password: password,
Subdomain: o.SubDomain,
}
auth_response := AuthResponse{}
oauth_token, err := o.Get_Token(); if err != nil {
return ErrorOcurred(err)
}
url := o.GetUrl(USER_AUTHENTICATE)
headers := Headers(fmt.Sprintf("bearer:%s", oauth_token))
client := HttpClient{Url: url, Headers: headers}
_, err = client.Request("POST", &auth_request, &auth_response) ; if err != nil {
return ErrorOcurred(err)
}
if auth_response.Status.Error {
logger.Errorf("Error authenticating user: %s", auth_response.Status.Message)
return ErrorOcurred(errors.New("An error occurred while authenticating."))
}
/** TODO: do not assume MFA is required. **/
return o.VerifyToken(
strconv.Itoa(auth_response.Data[0].Devices[0].Device_id),
auth_response.Data[0].State_token,
token,
)
}
/**
** Verify an MFA token
**/
func (o *OneLogin) VerifyToken(device_id string, state_token string, token string)(error) {
logger.Debugf("Verifying MFA token for device %s", device_id)
verify_request := VerifyTokenRequest{
Device_id : device_id,
State_token : state_token,
Otp_token : token,
}
auth_response := &AuthResponse{}
oauth_token, err := o.Get_Token(); if err != nil {
return ErrorOcurred(err)
}
url := o.GetUrl(USER_VERIFY_FACTOR)
headers := Headers(fmt.Sprintf("bearer:%s", oauth_token))
client := HttpClient{Url: url, Headers: headers}
_, err = client.Request("POST", &verify_request, &auth_response); if err != nil {
return ErrorOcurred(err)
}
if auth_response.Status.Error {
return ErrorOcurred(errors.New(auth_response.Status.Message))
}
return nil
}
|
package main
//for manual testing with browser
//have to delete browser cookies everytime because db is cleared on reset
import (
"github.com/hokora/bank/db/server"
"github.com/hokora/bank/recurring"
"time"
"log"
mgo "gopkg.in/mgo.v2"
"os"
"github.com/hokora/bank/http"
"github.com/hokora/bank/auth"
"github.com/hokora/bank/frontend"
"strconv"
)
const (
DB_NAME = "mocktest"
MAINDB_COLL_NAME = "maindb"
MAINDB_SOCKET_FILE = "maindb.mocksocket"
RP_COLL_NAME = "rp"
RP_SOCKET_FILE = "rp.mocksocket"
SESS_COLLECTION_NAME = "sessions"
DEFAULT_PASSWORD = "password"
AUTH_COLLECTION_NAME = "auth"
BANK_SERVICE_PORT = 13998
AUTH_SERVICE_PORT = 13999
FRONTEND_PORT = 14000
)
func main() {
sess, _ := mgo.Dial("localhost")
db := sess.DB(DB_NAME)
err := db.DropDatabase()
if err != nil {
log.Print("could not reset database")
return
}
os.Remove(MAINDB_SOCKET_FILE)
os.Remove(RP_SOCKET_FILE)
mainDB := server.NewServer(DB_NAME, MAINDB_COLL_NAME)
go mainDB.Start(MAINDB_SOCKET_FILE)
defer mainDB.Close()
time.Sleep(time.Second * 2)
rp := recurring.NewServer(DB_NAME, RP_COLL_NAME)
go rp.Start(RP_SOCKET_FILE, time.Hour * 2, MAINDB_SOCKET_FILE)
time.Sleep(time.Second * 2)
ips := map[string]struct{}{}
ips["::1"] = struct{}{}
ips["127.0.0.1"] = struct{}{}
s := http.NewServer(DEFAULT_PASSWORD, ips, DB_NAME, SESS_COLLECTION_NAME)
go s.Start(BANK_SERVICE_PORT, MAINDB_SOCKET_FILE, RP_SOCKET_FILE)
time.Sleep(time.Second * 2)
authServer := auth.NewServer(auth.Config{DEFAULT_PASSWORD, ips}, DB_NAME, AUTH_COLLECTION_NAME)
go authServer.Start(AUTH_SERVICE_PORT)
time.Sleep(time.Second * 2)
bankService := frontend.ServiceConfig{
"http://localhost:" + strconv.Itoa(BANK_SERVICE_PORT),
DEFAULT_PASSWORD,
}
authService := frontend.ServiceConfig{
"http://localhost:" + strconv.Itoa(AUTH_SERVICE_PORT),
DEFAULT_PASSWORD,
}
frontendServer := frontend.NewServer(bankService, authService)
log.Print("ready")
frontendServer.Start(FRONTEND_PORT)
}
|
use std::any::Any;
use std::fmt;
use std::fmt::Debug;
pub trait Object: Any {
fn serialize(&self) -> Vec<u8>;
}
pub trait ObjectLike: Object {
fn deserialize(Vec<u8>) -> Box<Object>;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SerializedObject {
id: u64,
data: Vec<u8>,
}
impl<'a> From<&'a Object> for SerializedObject {
fn from(o: &'a Object) -> Self {
let mut h = 0; // dummy value
unimplemented!(); // get_type_id()
SerializedObject { id: h, data: o.serialize() }
}
}
impl Into<Box<Object>> for SerializedObject {
fn into(self) -> Box<Object> {
unimplemented!() // TYPES[self.id].deserialize(s.data)
}
}
impl Debug for Box<Object> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Object")
}
}
|
package pgsql
import (
"testing"
)
func TestIntervalArray(t *testing.T) {
testlist2{{
data: []testdata{
{
input: string(`{"1 day","-5 years -4 mons -00:34:00"}`),
output: string(`{"1 day","-5 years -4 mons -00:34:00"}`)},
},
}, {
data: []testdata{
{
input: []byte(`{"1 day","-5 years -4 mons -00:34:00"}`),
output: []byte(`{"1 day","-5 years -4 mons -00:34:00"}`)},
},
}}.execute(t, "intervalarr")
}
|
/*
There is a sale going on in Chefland. For every 2 items Chef pays for, he gets the third item for free (see sample explanations for more clarity).
It is given that the cost of 1 item is X rupees. Find the minimum money required by Chef to buy at least N items.
Input Format
First line will contain T, number of test cases. Then the test cases follow.
Each test case contains of a single line of input, two integers N and X.
Output Format
For each test case, output the minimum money required by Chef to buy at least N items.
Constraints
1≤T≤1000
1≤N,X≤1000
*/
package main
func main() {
assert(cost(3, 4) == 8)
assert(cost(4, 2) == 6)
assert(cost(5, 3) == 12)
assert(cost(6, 1) == 4)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func cost(n, x int) int {
return 2*(n/3)*x + (n%3)*x
}
|
package main
import (
"fmt"
"sort"
)
func MaxNum(a []int) int {
//count := -1
if len(a) == 1 {
//temp := 0
//temp = a[0]
return a[0]
} else {
//sort.Ints(a)
sort.Sort(sort.Reverse(sort.IntSlice(a)))
a = a[0 : len(a)-1]
return MaxNum(a)
}
}
func main() {
fmt.Println(MaxNum([]int{2}))
}
|
package _725_Split_Linked_List_in_Parts
import (
"fmt"
"testing"
)
func TestSplitListToParts(t *testing.T) {
l := &ListNode{
Val: 1,
Next: &ListNode{
Val: 2,
Next: &ListNode{
Val: 3,
Next: nil,
},
},
}
k := 5
ret := splitListToParts(l, k)
for _, tl := range ret {
fmt.Println(tl)
}
}
|
package swap
import (
"context"
"errors"
"fmt"
"time"
"github.com/gagliardetto/solana-go"
associatedtokenaccount "github.com/gagliardetto/solana-go/programs/associated-token-account"
"github.com/gagliardetto/solana-go/programs/token"
"github.com/gagliardetto/solana-go/rpc"
"github.com/gopartyparrot/goparrot-twap/config"
"github.com/gopartyparrot/goparrot-twap/price"
"github.com/gopartyparrot/goparrot-twap/store"
"go.uber.org/zap"
)
var (
ErrSwapPoolNotFound = errors.New("swap pool not found for given pair")
ErrUpdateBalances = errors.New("failed to update wallet balances")
ErrFromBalanceNotEnough = errors.New("from balance not enough for swap")
ErrStopAmountReached = errors.New("stop amount reached, balance is full")
)
type SwapSide string
const (
SwapSide_Buy SwapSide = "buy"
SwapSide_Sell SwapSide = "sell"
)
type SwapStatus struct {
TxID string
Pair string
Date string
Side SwapSide
Amount uint64
ErrLogs string `json:",omitempty"`
}
type SwapTaskConfig struct {
pair string
side SwapSide
amount float64
stopAmount float64
fromToken string
toToken string
transferAddress string
transferTokenAccount solana.PublicKey
transferThreshold float64
priceThreshold float32
pool config.PoolConfig
}
type TokenSwapperConfig struct {
ClientRPC *rpc.Client
RPCWs string
PrivateKey string
StorePath string
Tokens map[string]config.TokenInfo
Pools map[string]config.PoolConfig
Logger *zap.Logger
}
type TokenSwapper struct {
clientRPC *rpc.Client
RPCWs string
store *store.JSONStore
account solana.PrivateKey
logger *zap.Logger
raydiumSwap *RaydiumSwap
tokens map[string]config.TokenInfo
pools map[string]config.PoolConfig
tokenBalances map[string]uint64
tokenAccounts map[string]solana.PublicKey
swapTask SwapTaskConfig
}
func (s *TokenSwapper) Init(
ctx context.Context,
pair string,
side SwapSide,
amount float64,
stopAmount float64,
transferAddress string,
transferThreshold float64,
priceThreshold float32,
) error {
s.swapTask = SwapTaskConfig{
pair: pair,
side: side,
amount: amount,
stopAmount: stopAmount,
transferAddress: transferAddress,
transferThreshold: transferThreshold,
priceThreshold: priceThreshold,
}
for k, v := range s.pools {
if k == pair {
s.swapTask.pool = v
}
}
if s.swapTask.pool.FromToken == "" {
return ErrSwapPoolNotFound
}
s.swapTask.fromToken = s.swapTask.pool.FromToken
s.swapTask.toToken = s.swapTask.pool.ToToken
if side == SwapSide_Sell {
s.swapTask.fromToken = s.swapTask.pool.ToToken
s.swapTask.toToken = s.swapTask.pool.FromToken
}
mints := []solana.PublicKey{
solana.MustPublicKeyFromBase58(s.swapTask.pool.FromToken),
solana.MustPublicKeyFromBase58(s.swapTask.pool.ToToken),
}
existingAccounts, missingAccounts, err := GetTokenAccountsFromMints(ctx, *s.clientRPC, s.account.PublicKey(), mints...)
if err != nil {
return err
}
if len(missingAccounts) != 0 {
instrs := []solana.Instruction{}
for mint := range missingAccounts {
if mint == config.NativeSOL {
continue
}
s.logger.Info("need to create token account", zap.String("mint", mint))
inst, err := associatedtokenaccount.NewCreateInstruction(
s.account.PublicKey(),
s.account.PublicKey(),
solana.MustPublicKeyFromBase58(mint),
).ValidateAndBuild()
if err != nil {
return err
}
instrs = append(instrs, inst)
}
sig, err := ExecuteInstructionsAndWaitConfirm(ctx, s.clientRPC, s.RPCWs, []solana.PrivateKey{s.account}, instrs...)
if err != nil {
return err
}
s.logger.Info("missing token accounts created", zap.String("txID", sig.String()))
for k, v := range missingAccounts {
existingAccounts[k] = v
}
}
s.tokenAccounts = existingAccounts
err = s.UpdateTransferTokenAccount(ctx, transferAddress)
if err != nil {
return err
}
return nil
}
func (s *TokenSwapper) UpdateTransferTokenAccount(ctx context.Context, ownerAddress string) error {
if ownerAddress == "" {
return nil
}
toTokenPK := solana.MustPublicKeyFromBase58(s.swapTask.toToken)
ownerPK := solana.MustPublicKeyFromBase58(ownerAddress)
existingAccounts, missingAccounts, err := GetTokenAccountsFromMints(ctx, *s.clientRPC, ownerPK, toTokenPK)
if err != nil {
return err
}
if len(missingAccounts) > 0 {
s.logger.Info("transfer address do not have a token account", zap.String("mint", s.swapTask.toToken))
return nil
}
s.swapTask.transferTokenAccount = existingAccounts[s.swapTask.toToken]
return nil
}
func (s *TokenSwapper) UpdateBalances(ctx context.Context) error {
pks := []solana.PublicKey{}
for _, v := range s.tokenAccounts {
pks = append(pks, v)
}
res, err := GetTokenAccountsBalance(ctx, *s.clientRPC, pks...)
if err != nil {
return err
}
for address, amount := range res {
s.tokenBalances[address] = amount
}
return nil
}
func (s *TokenSwapper) GetCurrentPrice(ctx context.Context) (float32, error) {
client := price.NewClient(nil)
res, err := client.SimplePrice([]string{s.swapTask.pool.CoinGeckoID}, []string{"usd"})
if err != nil {
return 0, err
}
price := *res
return price[s.swapTask.pool.CoinGeckoID]["usd"], nil
}
func (s *TokenSwapper) TransferBalance(ctx context.Context, sourceAddress solana.PublicKey, amount uint64, destAddress solana.PublicKey) error {
transferTx, err := token.NewTransferInstruction(
amount,
sourceAddress,
destAddress,
s.account.PublicKey(),
[]solana.PublicKey{},
).ValidateAndBuild()
if err != nil {
return err
}
sig, err := ExecuteInstructionsAndWaitConfirm(ctx, s.clientRPC, s.RPCWs, []solana.PrivateKey{s.account}, transferTx)
if err != nil {
s.logger.Warn("transfer amount failed, will try again in next interval", zap.Error(err))
return err
}
s.logger.Info("transfer balance success", zap.String("txID", sig.String()))
return nil
}
func (s *TokenSwapper) Start() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2)
defer cancel()
err := s.UpdateBalances(ctx)
if err != nil {
return ErrUpdateBalances
}
fromToken := s.swapTask.fromToken
fromAddress := s.tokenAccounts[fromToken]
fromBalance := s.tokenBalances[fromAddress.String()]
fromTokenInfo := s.tokens[fromToken]
toToken := s.swapTask.toToken
toAddress := s.tokenAccounts[toToken]
toBalance := s.tokenBalances[toAddress.String()]
toTokenInfo := s.tokens[toToken]
amount := fromTokenInfo.FromFloat(s.swapTask.amount)
stopAmount := toTokenInfo.FromFloat(s.swapTask.stopAmount)
transferThreshold := toTokenInfo.FromFloat(s.swapTask.transferThreshold)
if transferThreshold > 0 && toBalance > transferThreshold && s.swapTask.transferAddress != "" {
s.logger.Info("transfer threshold reached, transfering "+toTokenInfo.Symbol+" to transferAddress",
zap.Float64("threshold", s.swapTask.transferThreshold),
zap.Uint64("transferAmount", toBalance),
zap.String("transferAddress", s.swapTask.transferAddress),
zap.String("transferTokenAcccount", s.swapTask.transferTokenAccount.String()),
)
s.TransferBalance(ctx, toAddress, toBalance, s.swapTask.transferTokenAccount)
}
if stopAmount > 0 && toBalance > stopAmount {
s.logger.Info("stop amount reached, stopping swap "+fromTokenInfo.Symbol+" to "+toTokenInfo.Symbol,
zap.Uint64("stopAmount", stopAmount),
zap.Uint64("currentBalance", toBalance),
)
return ErrStopAmountReached
}
if amount > fromBalance {
s.logger.Warn("not enough balance to swap "+fromTokenInfo.Symbol+" to "+toTokenInfo.Symbol,
zap.Uint64("swapAmount", amount),
zap.Uint64("currentBalance", fromBalance),
)
return ErrFromBalanceNotEnough
}
// Check if current price is
if s.swapTask.priceThreshold > 0 {
currentPrice, err := s.GetCurrentPrice(ctx)
if err != nil {
s.logger.Warn("fail to get current price", zap.Error(err))
return err
}
if s.swapTask.side == SwapSide_Sell && currentPrice < s.swapTask.priceThreshold {
s.logger.Info("price still low (below priceThreshold). no need to sell",
zap.Float32("currentPrice", currentPrice),
zap.Float32("priceThreshold", s.swapTask.priceThreshold),
)
return nil
}
if s.swapTask.side == SwapSide_Buy && currentPrice > s.swapTask.priceThreshold {
s.logger.Info("price still high (above priceThreshold). no need to buy",
zap.Float32("currentPrice", currentPrice),
zap.Float32("priceThreshold", s.swapTask.priceThreshold),
)
return nil
}
}
sig, err := s.raydiumSwap.Swap(
ctx,
&s.swapTask.pool.RaydiumPoolConfig,
amount,
fromToken,
fromAddress,
toToken,
toAddress,
)
status := SwapStatus{
Date: time.Now().UTC().Format(time.UnixDate),
Pair: s.swapTask.pair,
Side: s.swapTask.side,
Amount: amount,
}
if err != nil {
s.logger.Warn("swap fail", zap.Error(err))
status.ErrLogs = fmt.Sprintf("error: %v", err)
} else {
s.logger.Info("swap success", zap.String("txID", sig.String()))
status.TxID = sig.String()
}
key := fmt.Sprintf("%s_%s", status.Pair, status.Date)
s.store.Set(key, status)
return nil
}
func NewTokenSwapper(cfg TokenSwapperConfig) (*TokenSwapper, error) {
store, err := store.OpenJSONStore(cfg.StorePath)
if err != nil {
return nil, err
}
privateKey, err := solana.PrivateKeyFromBase58(cfg.PrivateKey)
if err != nil {
return nil, err
}
raydiumSwap := RaydiumSwap{
clientRPC: cfg.ClientRPC,
account: privateKey,
}
l := TokenSwapper{
clientRPC: cfg.ClientRPC,
RPCWs: cfg.RPCWs,
store: store,
logger: cfg.Logger,
pools: cfg.Pools,
tokens: cfg.Tokens,
account: privateKey,
raydiumSwap: &raydiumSwap,
tokenBalances: map[string]uint64{},
}
return &l, nil
}
|
package main
import(
"github.com/martini-contrib/render"
)
// モデル
type Profile struct {
Name string
Skill []string
}
// モデル
type AboutViewModel struct {
Title string
Profile Profile
}
// Get("/about", ...) に対するコールバック関数
func AboutRender(r render.Render) {
// モデル作成
profile := Profile{ Name: "perrier1034", Skill: []string{"python", "scala", "go"} }
viewModel := AboutViewModel{ "About me", profile }
// レンダリング
r.HTML(200, "about", viewModel)
}
|
package main
import (
"io/ioutil"
"net/url"
"path"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
gitssh "github.com/go-git/go-git/v5/plumbing/transport/ssh"
"github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh"
)
// setupAuth, if necessary, configures the git CLI for authentication using
// either SSH or the "store" (username/password-based) credential helper since
// the git-initializer component does fall back on the git CLI for certain
// operations. It additionally returns an appropriate implementation of
// transport.AuthMethod for operations that interact with remote repositories
// programmatically.
func setupAuth(evt event) (transport.AuthMethod, error) {
homeDir, err := homedir.Dir()
if err != nil {
return nil, errors.Wrap(err, "error finding user's home directory")
}
// If an SSH key was provided, use that.
if key, ok := evt.Project.Secrets["gitSSHKey"]; ok {
// If a passphrase was supplied for the key, decrypt the key now.
keyPass, ok := evt.Project.Secrets["gitSSHKeyPassword"]
if ok {
var err error
if key, err = decryptKey(key, keyPass); err != nil {
return nil, errors.Wrap(err, "error decrypting SSH key")
}
}
rsaKeyPath := path.Join(homeDir, ".ssh", "id_rsa")
if err := ioutil.WriteFile(rsaKeyPath, []byte(key), 0600); err != nil {
return nil, errors.Wrapf(err, "error writing SSH key to %q", rsaKeyPath)
}
// This is the implementation of the transport.AuthMethod interface that can
// be used for operations that interact with the remote repository
// interactively.
publicKeys, err := gitssh.NewPublicKeys("git", []byte(key), keyPass)
if err != nil {
return nil,
errors.Wrap(err, "error getting transport.AuthMethod using SSH key")
}
// This prevents the CLI from interactively requesting the user to allow
// connection to a new/unrecognized host.
publicKeys.HostKeyCallback = ssh.InsecureIgnoreHostKey() // nolint: gosec
return publicKeys, nil // We're done
}
// If a password was provided, use that.
if password, ok := evt.Project.Secrets["gitPassword"]; ok {
credentialURL, err := url.Parse(evt.Worker.Git.CloneURL)
if err != nil {
return nil,
errors.Wrapf(err, "error parsing URL %q", evt.Worker.Git.CloneURL)
}
// If a username was provided, use it. One may not have been because some
// git providers, like GitHub, for instance, will allow any non-empty
// username to be used in conjunction with a personal access token.
username, ok := evt.Project.Secrets["gitUsername"]
// If a username wasn't provided, we can ALSO try to pick it out of the URL.
if !ok && credentialURL.User != nil {
username = credentialURL.User.Username()
}
// If the username is still the empty string, we assume we're working with a
// git provider like GitHub that only requires the username to be non-empty.
// We arbitrarily set it to "git".
if username == "" {
username = "git"
}
// Remove path and query string components from the URL
credentialURL.Path = ""
credentialURL.RawQuery = ""
// Augment the URL with user/pass information.
credentialURL.User = url.UserPassword(username, password)
// Write the URL to the location used by the "stored" credential helper.
credentialsPath := path.Join(homeDir, ".git-credentials")
if err := ioutil.WriteFile(
credentialsPath,
[]byte(credentialURL.String()),
0600,
); err != nil {
return nil,
errors.Wrapf(err, "error writing credentials to %q", credentialsPath)
}
// This is the implementation of the transport.AuthMethod interface that can
// be used for operations that interact with the remote repository
// interactively.
return &http.BasicAuth{
Username: username,
Password: password,
}, nil // We're done
}
// No auth setup required if we get to here.
return nil, nil
}
|
//go:build generate
package generated
import (
_ "github.com/calico-vpp/vpplink/pkg"
_ "go.fd.io/govpp/cmd/binapi-generator"
)
//go:generate go build -buildmode=plugin -o ./.bin/vpplink_plugin.so github.com/calico-vpp/vpplink/pkg
//go:generate go run go.fd.io/govpp/cmd/binapi-generator --no-version-info --no-source-path-info --gen rpc,./.bin/vpplink_plugin.so -o ./bindings --input $VPP_DIR ikev2 gso arp interface ip ipip ipsec ip_neighbor tapv2 nat44_ed cnat af_packet feature ip6_nd punt vxlan af_xdp vlib virtio avf wireguard capo memif acl abf crypto_sw_scheduler sr rdma vmxnet3 pbl memclnt session vpe urpf classify ip_session_redirect
|
package event
import (
"context"
"github.com/dwaynelavon/es-loyalty-program/internal/app/eventsource"
"github.com/dwaynelavon/es-loyalty-program/internal/app/user"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type userEventHandler struct {
readRepo user.ReadRepo
logger *zap.Logger
sLogger *zap.SugaredLogger
eventStore eventsource.EventStore
isReadModelSynced bool
}
// NewEventHandler creates an instance of EventHandler
func NewEventHandler(
logger *zap.Logger,
readRepo user.ReadRepo,
eventStore user.EventStore,
) eventsource.EventHandler {
return &userEventHandler{
readRepo: readRepo,
eventStore: eventStore,
logger: logger,
sLogger: logger.Sugar(),
}
}
// EventTypesHandled implements the EventHandler interface
func (h *userEventHandler) EventTypesHandled() []string {
return []string{
user.UserCreatedEventType,
user.UserDeletedEventType,
user.UserReferralCreatedEventType,
user.UserReferralCompletedEventType,
user.PointsEarnedEventType,
}
}
// Sync implements the EventHandler interface
func (h *userEventHandler) Sync(
ctx context.Context,
aggregateID string,
) error {
h.logger.Info(
"syncing read model",
zap.String("aggregateId", aggregateID),
)
aggregate, errAggregate := h.loadAggregate(ctx, aggregateID)
if errAggregate != nil {
return errAggregate
}
if aggregate == nil {
return nil
}
history, errHistory := h.eventStore.Load(
ctx,
aggregateID,
aggregate.Version,
)
if errHistory != nil {
return errors.Wrap(errHistory, "unable to load aggregate history")
}
for _, v := range history {
errHandle := h.handleEvent(ctx, v)
if errHandle != nil {
return errHandle
}
}
h.isReadModelSynced = true
return nil
}
// Handle implements the EventHandler interface
func (h *userEventHandler) Handle(
ctx context.Context,
event eventsource.Event,
) error {
var errSync error
if !h.isReadModelSynced {
errSync = h.Sync(ctx, event.AggregateID)
if errSync != nil {
return errors.Wrap(errSync, "unable to sync read model with event store")
}
return nil
}
return h.handleEvent(ctx, event)
}
func (h *userEventHandler) loadAggregate(
ctx context.Context,
aggregateID string,
) (*user.DTO, error) {
// TODO: cache this call per request
aggregate, errAggregate := h.readRepo.User(ctx, aggregateID)
if errAggregate != nil {
if status.Code(errAggregate) == codes.NotFound {
return nil, nil
}
return nil, errors.Wrap(
errAggregate,
"unable to load aggregate from read model",
)
}
return aggregate, nil
}
/* ----- handlers ----- */
func (h *userEventHandler) handleEvent(
ctx context.Context,
event eventsource.Event,
) error {
aggregate, errAggregate := h.loadAggregate(ctx, event.AggregateID)
if errAggregate != nil {
return errAggregate
}
switch event.EventType {
case user.PointsEarnedEventType:
return handlePointsEarned(ctx, event, h.readRepo, aggregate)
case user.UserCreatedEventType:
return handleUserCreated(ctx, event, h.readRepo)
case user.UserReferralCompletedEventType:
return handleUserReferralCompleted(ctx, event, h.readRepo, aggregate)
case user.UserReferralCreatedEventType:
return handleUserReferralCreated(ctx, event, h.readRepo, aggregate)
case user.UserDeletedEventType:
return h.readRepo.DeleteUser(ctx, event.AggregateID)
}
return nil
}
func handlePointsEarned(
ctx context.Context,
event eventsource.Event,
readRepo user.ReadRepo,
aggregate *user.DTO,
) error {
var operation eventsource.Operation = "user.handlePointsEarned"
if aggregate == nil {
return eventsource.AggregateNotFoundErr(operation, event.AggregateID)
}
pointsEarnedEvent := user.PointsEarned{
ApplierModel: *eventsource.NewApplierModel(event),
}
payload, errPayload := pointsEarnedEvent.GetDeserializedPayload()
if errPayload != nil {
return errPayload
}
return readRepo.EarnPoints(
ctx,
event.AggregateID,
payload.PointsEarned,
aggregate.Version+1,
)
}
func handleUserCreated(
ctx context.Context,
event eventsource.Event,
readRepo user.ReadRepo,
) error {
createdEvent := user.Created{
ApplierModel: *eventsource.NewApplierModel(event),
}
payload, errPayload := createdEvent.GetDeserializedPayload()
if errPayload != nil {
return errPayload
}
userDTO := user.DTO{
UserID: createdEvent.AggregateID,
Username: payload.Username,
Email: payload.Email,
CreatedAt: createdEvent.EventAt,
UpdatedAt: createdEvent.EventAt,
ReferralCode: payload.ReferralCode,
ReferredByCode: payload.ReferredByCode,
AggregateBase: eventsource.AggregateBase{
Version: event.Version,
},
}
return readRepo.CreateUser(
ctx,
userDTO,
)
}
func handleUserReferralCompleted(
ctx context.Context,
event eventsource.Event,
readRepo user.ReadRepo,
aggregate *user.DTO,
) error {
var operation eventsource.Operation = "user.handleUserReferralCompleted"
if aggregate == nil {
return eventsource.AggregateNotFoundErr(operation, event.AggregateID)
}
referralCompletedEvent := user.ReferralCompleted{
ApplierModel: *eventsource.NewApplierModel(event),
}
payload, errPayload := referralCompletedEvent.GetDeserializedPayload()
if errPayload != nil {
return errPayload
}
return readRepo.UpdateReferralStatus(
ctx,
event.AggregateID,
payload.ReferralID,
user.ReferralStatusCompleted,
aggregate.Version+1,
)
}
func handleUserReferralCreated(
ctx context.Context,
event eventsource.Event,
readRepo user.ReadRepo,
aggregate *user.DTO,
) error {
var operation eventsource.Operation = "user.handlerUserReferralCreated"
if aggregate == nil {
return eventsource.AggregateNotFoundErr(operation, event.AggregateID)
}
referralCreatedEvent := user.ReferralCreated{
ApplierModel: *eventsource.NewApplierModel(event),
}
payload, errPayload := referralCreatedEvent.GetDeserializedPayload()
if errPayload != nil {
return errPayload
}
status, errStatus := user.GetReferralStatus(&payload.ReferralStatus)
if errStatus != nil {
return eventsource.InvalidPayloadErr(
operation,
errStatus,
event.AggregateID,
payload,
)
}
return readRepo.CreateReferral(
ctx,
event.AggregateID,
user.Referral{
ID: payload.ReferralID,
ReferralCode: payload.ReferralCode,
ReferredUserEmail: payload.ReferredUserEmail,
Status: status,
CreatedAt: event.EventAt,
UpdatedAt: event.EventAt,
},
aggregate.Version+1,
)
}
|
package invoice
import (
"time"
)
type ProcessedState struct {
}
func (s *ProcessedState) State(i *Invoice) State {
now := time.Now()
if i.DueDate.Before(now) {
return Failed
}
return WaitForPayment
}
func (s *ProcessedState) Publish(i *Invoice) error {
return InvoiceError{InvoiceErrorInvalidStateTransition}
}
func (s *ProcessedState) Process(i *Invoice) error {
return nil
}
func (s *ProcessedState) Pay(i *Invoice, transactionID string) error {
if i.Payment != nil {
i.Payment.TransactionID = transactionID
}
return i.SetState(&PaidState{})
}
func (s *ProcessedState) Fail(i *Invoice) error {
return i.SetState(&FailedState{})
}
func (s *ProcessedState) Reset(i *Invoice) error {
if s.State(i) == Failed {
return i.SetState(&DraftState{})
}
return InvoiceError{InvoiceErrorInvalidStateTransition}
}
|
package tasks
// Sanitize returns a copy of the config sanitized for client side input.
// This is strictly a whitelist for safety.
func (src *Config) Sanitize() *Config {
dst := &Config{
Tasks: map[string]Task{},
}
for name, srcT := range src.Tasks {
dstT := Task{
Title: srcT.Title,
Description: srcT.Description,
Schema: srcT.Schema,
UISchema: srcT.UISchema,
}
dst.Tasks[name] = dstT
}
return dst
}
|
package scheduler
import (
"context"
"fmt"
"sync/atomic"
"time"
"github.com/apex/log"
"github.com/gocraft/work"
"gopkg.in/tomb.v2"
"git.scc.kit.edu/sdm/lsdf-checksum/internal/lifecycle"
"git.scc.kit.edu/sdm/lsdf-checksum/workqueue"
)
type ControllerScheduler interface {
SetInterval(interval time.Duration)
GetQueue() workqueue.QueueQuerier
RequestProduction(n uint)
RequestProductionUntilThreshold(threshold uint)
Stats() SchedulerStats
}
type Controller interface {
// Init is called once: When the Scheduler is starting.
Init(scheduler ControllerScheduler)
// Schedule is called by Scheduler with a frequency corresponding to
// its interval value.
// Schedule is never called concurrently, the next call to schedule takes
// place `intv` time after the previous call completed.
// RequestProduction() is the means of scheduling for the Controller.
// During Schedule, the Controller should request the enqueueing of new
// tasks by calling RequestProduction() on the Scheduler.
Schedule()
}
type ProductionOrder[T workqueue.JobPayload] struct {
order orderBookOrder
scheduler *Scheduler[T]
}
func (p *ProductionOrder[T]) Total() int {
return p.order.Total()
}
func (p *ProductionOrder[T]) Remaining() int {
return p.order.Remaining()
}
func (p *ProductionOrder[T]) Fulfilled() int {
return p.order.Fulfilled()
}
func (p *ProductionOrder[T]) Enqueue(payload T) (*work.Job, error) {
job, err := p.scheduler.enqueue(payload)
if err != nil {
return job, fmt.Errorf("ProductionOrder.Enqueue: %w", err)
}
p.order.Fulfill(1)
return job, nil
}
type Config struct {
Controller Controller
Logger log.Interface
}
var _ ControllerScheduler = &Scheduler[*workqueue.WorkPack]{}
type Scheduler[T workqueue.JobPayload] struct {
config Config
tomb *tomb.Tomb
queue *workqueue.QueueClient[T]
fieldLogger log.Interface
orderBook orderBook
queueObservedEmpty atomic.Uint64
interval time.Duration
}
func New[T workqueue.JobPayload](queue *workqueue.QueueClient[T], config Config) *Scheduler[T] {
return &Scheduler[T]{
config: config,
queue: queue,
orderBook: *newOrderBook(),
}
}
func (s *Scheduler[T]) Start(ctx context.Context) {
s.fieldLogger = s.config.Logger.WithFields(log.Fields{
"queue": s.queue.Name(),
"component": "scheduler.QueueScheduler",
})
s.tomb, _ = tomb.WithContext(ctx)
s.fieldLogger.Info("Performing scheduling controller initialisation")
s.config.Controller.Init(s)
s.tomb.Go(s.run)
}
func (s *Scheduler[T]) SignalStop() {
s.tomb.Kill(lifecycle.ErrStopSignalled)
}
func (s *Scheduler[T]) Wait() error {
return s.tomb.Wait()
}
func (s *Scheduler[T]) Dead() <-chan struct{} {
return s.tomb.Dead()
}
func (s *Scheduler[T]) Err() error {
return s.tomb.Err()
}
func (s *Scheduler[T]) run() error {
dying := s.tomb.Dying()
timer := time.NewTimer(time.Duration(0))
s.fieldLogger.Info("Starting scheduling loop")
// Exhaust timer
if !timer.Stop() {
<-timer.C
}
L:
for {
timer.Reset(s.interval)
select {
case <-timer.C:
s.fieldLogger.Debug("Calling schedule")
s.config.Controller.Schedule()
case <-dying:
// Exhaust timer
if !timer.Stop() {
<-timer.C
}
break L
}
}
s.fieldLogger.WithField("action", "stopping").Info("Finished scheduling loop")
return nil
}
// This method is part of the lower facing API (towards Controller).
func (s *Scheduler[T]) SetInterval(interval time.Duration) {
s.fieldLogger.WithField("interval", interval).Debug("Setting interval")
s.interval = interval
}
// This method is part of the lower facing API (towards Controller).
func (s *Scheduler[T]) GetQueue() workqueue.QueueQuerier {
return s.queue
}
// This method is part of the lower facing API (towards Controller).
func (s *Scheduler[T]) RequestProduction(n uint) {
s.fieldLogger.WithField("n", n).Debug("Requesting production")
s.orderBook.Add(n)
}
// This method is part of the lower facing API (towards Controller).
func (s *Scheduler[T]) RequestProductionUntilThreshold(threshold uint) {
s.fieldLogger.WithField("threshold", threshold).Debug("Requesting production until threshold")
s.orderBook.AddUntilThreshold(threshold)
}
// AcquireOrder acquires a production order from the internal order book of
// the Scheduler.
// The caller must fulfill the order by calling [ProductionOrder.Enqueue]
// [ProductionOrder.Total] times. The order will contain a maximum of max
// items.
//
// This method is part of the upper facing API (towards Producer).
func (s *Scheduler[T]) AcquireOrder(ctx context.Context, max uint) (ProductionOrder[T], error) {
order, err := s.orderBook.AcquireOrder(ctx, max)
if err != nil {
return ProductionOrder[T]{}, fmt.Errorf("Scheduler.AcquireOrder: %w", err)
}
return ProductionOrder[T]{
order: order,
scheduler: s,
}, nil
}
type SchedulerStats struct {
OrdersInQueue uint64
OrdersInProgress uint64
JobsRequested uint64
JobsOrdered uint64
JobsEnqueued uint64
QueueObservedEmpty uint64
}
func (s *Scheduler[T]) Stats() SchedulerStats {
stats := s.orderBook.Stats()
return SchedulerStats{
OrdersInQueue: stats.InQueue,
OrdersInProgress: stats.InProgress,
JobsRequested: stats.Requested,
JobsOrdered: stats.Ordered,
JobsEnqueued: stats.Fulfilled,
QueueObservedEmpty: s.queueObservedEmpty.Load(),
}
}
func (s *Scheduler[T]) enqueue(payload T) (*work.Job, error) {
job, err := s.queue.Enqueuer().Enqueue(payload)
if err != nil {
return nil, fmt.Errorf("Scheduler.enqueue: %w", err)
}
info, err := s.queue.GetQueueInfo()
if err != nil {
return nil, fmt.Errorf("Scheduler.enqueue: %w", err)
}
if info.QueuedJobs <= 1 {
s.queueObservedEmpty.Add(1)
}
return job, nil
}
|
package main
import (
"fmt"
)
var (
coins = 50
users = []string{
"Matthew",
"Sarah",
"Augustus",
"Heidi",
"Emilie",
"Peter",
"Giana",
"Adriano",
"Aaron",
"Elizabeth",
}
distribution = make(map[string]int, len(users))
)
//自定义类型
type myInt int
// 类型别名
type yourInt = int
var a rune
func main() {
fmt.Println("剩下:", dispatchCoin())
fmt.Println("分配情况:", distribution)
}
func dispatchCoin() int {
for _, v:= range users {
coins -= coinsPerPerson(v)
}
return coins
}
func coinsPerPerson(name string) (coins int){
if name != ""{
for _,v := range name {
if v=='e' || v =='E' {
coins += 1
}
if v=='i' || v =='I' {
coins += 2
}
if v=='o' || v =='O' {
coins += 3
}
if v=='u' || v =='U' {
coins += 4
}
}
}
distribution[name] = coins
return
}
|
package socket
import (
"strconv"
"chatAppServer/models"
socketio "github.com/googollee/go-socket.io"
)
/*GroupData Socket组信息*/
type GroupData struct {
Id int
Phone string
NickName string
}
/*GroupMsgMount 挂载监听*/
func GroupMsgMount(server socketio.Server) {
JoinGroupRoom(server)
SendGroupMsg(server)
}
/*JoinGroupRoom 加入群组房间*/
func JoinGroupRoom(server socketio.Server) {
server.OnEvent("/socket.io/", "JoinGroupRoom", func(s socketio.Conn, msg models.SysGroupMsg) error {
groupRoom := "g_" + strconv.Itoa(msg.GroupId)
server.JoinRoom(groupRoom, s)
return nil
})
}
/*SendGroupMsg 发送群组消息*/
func SendGroupMsg(server socketio.Server) {
server.OnEvent("/socket.io/", "SendGroupMessage", func(s socketio.Conn, msg models.SysGroupMsg) error {
// 生成房间
groupRoom := "g_" + strconv.Itoa(msg.GroupId)
server.BroadcastToRoom(groupRoom, "GroupRoomMsg", msg)
models.AddGroupMsg(&msg)
return nil
})
}
|
package main
func celsiusToFahrenheit(temp float64) float64 {
return (temp - 32) * (5 / 9)
}
/*
Lowercase == private method
Private methods don't need comments!
*/
|
package model_test
import (
"buildings/platform/model"
. "buildings/platform/model"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Building", func() {
var (
dummyBuilding Building
dummy2Building Building
updateBuilding Building
emptyBuilding Building
)
BeforeEach(func() {
dummyBuilding = Building{
BuildingID: "1785cb0a-472c-444b-8862-9bc2c7ca1b18",
Type: "building",
Address: "Battery Road",
Name: "AxA Tower",
Floors: []Floor{
{
Level: 1,
Description: "Lobby",
},
{
Level: 2,
Description: "Canteen",
},
{
Level: 3,
Description: "Offices",
},
},
}
dummy2Building = Building{
BuildingID: "1785cb0a-472c-444b-8862-9bc2c7ca1b19",
Type: "building",
Address: "Battery Road",
Name: "AxA Tower 2",
Floors: []Floor{
{
Level: 1,
Description: "Lobby",
},
},
}
emptyBuilding = Building{}
})
Describe("Init Repo", func() {
repo := model.New()
Context("Add Building & Retrieve a Building Success", func() {
It("should retrieve dummyBuilding", func() {
repo.Add(dummyBuilding)
building, err := repo.Get("1785cb0a-472c-444b-8862-9bc2c7ca1b18")
if err != nil {
Fail("Cannot find inserted building")
}
Expect(building.Address).To(Equal("Battery Road"))
Expect(building.Name).To(Equal("AxA Tower"))
// cannot verify int8
//Expect(building.Floors[0].Level).To(Equal(1))
// Expect(building.Floors[1].Level).To(Equal(2))
Expect(building.Floors[0].Description).To(Equal("Lobby"))
Expect(building.Floors[1].Description).To(Equal("Canteen"))
Expect(building.Floors[2].Description).To(Equal("Offices"))
})
})
Context("Retrieve Building Failed", func() {
It("should not retrieve dummyBuilding", func() {
_, err := repo.Get("1785cb0a-472c-444b-8862-9bc2c7ca1b1")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Not found"))
})
})
Context("Add new building & Retrieve All Success", func() {
It("Should retrieve", func() {
repo.Add(dummy2Building)
items := repo.GetAll()
Expect(items).To(HaveLen(2))
})
})
Context("Update existing building success", func() {
It("Should return code 1", func() {
success := repo.Update("1785cb0a-472c-444b-8862-9bc2c7ca1b19", Building{Address: "Battery Road2"})
building, _ := repo.Get("1785cb0a-472c-444b-8862-9bc2c7ca1b19")
Expect(success).To(Equal(1))
Expect(building.Address).To(Equal("Battery Road2"))
Expect(building.Name).To(Equal("AxA Tower 2"))
success = repo.Update("1785cb0a-472c-444b-8862-9bc2c7ca1b19", Building{Name: "AxA Tower 3"})
building, _ = repo.Get("1785cb0a-472c-444b-8862-9bc2c7ca1b19")
Expect(success).To(Equal(1))
Expect(building.Address).To(Equal("Battery Road2"))
Expect(building.Name).To(Equal("AxA Tower 3"))
})
})
Context("Update failed as empty updateFields", func() {
It("Should return code 1", func() {
success := repo.Update("1785cb0a-472c-444b-8862-9bc2c7ca1b19", emptyBuilding)
Expect(success).To(Equal(1))
})
})
Context("Update failed as record not found", func() {
It("Should return code -1", func() {
success := repo.Update("1785cb0a-472c-444b-8862-9bc2c7ca1b12", updateBuilding)
Expect(success).To(Equal(-1))
})
})
Context("Delete success as record found", func() {
It("Should return code 1", func() {
success := repo.Delete("1785cb0a-472c-444b-8862-9bc2c7ca1b19")
items := repo.GetAll()
Expect(success).To(Equal(1))
Expect(items).To(HaveLen(1))
Expect(items[0].BuildingID).To(Equal("1785cb0a-472c-444b-8862-9bc2c7ca1b18"))
})
})
Context("Delete fail as record not found", func() {
It("Should return code -1", func() {
success := repo.Delete("1785cb0a-472c-444b-8862-9bc2c7ca1b15")
items := repo.GetAll()
Expect(success).To(Equal(-1))
Expect(items).To(HaveLen(1))
Expect(items[0].BuildingID).To(Equal("1785cb0a-472c-444b-8862-9bc2c7ca1b18"))
})
})
})
})
|
package log
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"gorestfulapiforcms/pkg/setting"
"log"
"os"
"strconv"
"time"
)
var absolute_file_name = ""
func init() {
//用数组串联整个目录结构,没有哪层就创建哪层 获取日志写入路径没有则创建
now := time.Now()
file_name := strconv.Itoa(now.Day()) + ".log"
var paths = [3] string {setting.Config().Log.Log_path, strconv.Itoa(now.Year()), strconv.Itoa(int(now.Month()))}
now_path := ""
for _, sub_path := range paths {
now_path += sub_path + "/"
fi, _ := os.Stat(now_path)
if fi == nil {
err := os.Mkdir(now_path, os.ModePerm)
fmt.Println(now_path)
if err != nil{
fmt.Println(err)
}
}
}
absolute_file_name = now_path + file_name
fi, _ := os.Stat(absolute_file_name)
if fi == nil {
os.Create(absolute_file_name) // 文件不存在就创建
}
}
// 日志记录到文件
func LoggerToFile() gin.HandlerFunc {
//写入文件
src, err := os.OpenFile(absolute_file_name, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
if err != nil {
fmt.Println("err", err)
}
//实例化
logger := logrus.New()
logger.Formatter = &logrus.TextFormatter{}
//设置输出
logger.Out = src
//设置日志级别
logger.SetLevel(logrus.DebugLevel)
//设置日志格式
//logger.SetFormatter(&logrus.TextFormatter{
// TimestampFormat:"2006-01-02 15:04:05",
//})
return func(c *gin.Context) {
// 开始时间
startTime := time.Now()
// 处理请求
c.Next()
// 结束时间
endTime := time.Now()
// 执行时间
latencyTime := endTime.Sub(startTime)
// 请求方式
reqMethod := c.Request.Method
// 请求路由
reqUri := c.Request.RequestURI
// 状态码
statusCode := c.Writer.Status()
// 请求IP
clientIP := c.ClientIP()
header := c.Request.Header
logIn(header)
// 日志格式 -- 基础内容
logger.Infof("%3d | %13v | %15s | %s | %s",
statusCode,
latencyTime,
clientIP,
reqMethod,
reqUri,
)
/* jsonRes, err := pkg.MapToJson(header)
if err != nil {
fmt.Printf("Convert json to map failed with error: %+v\n", err)
}
logIn(header)*/
}
}
//插入文本内容
func logIn(param string) {
//写入文件
src, err := os.OpenFile(absolute_file_name, os.O_APPEND|os.O_WRONLY, os.ModeAppend)
if err != nil {
fmt.Println("err", err)
}
lll := log.New(src, "", 0)
lll.Println(param)
}
|
package main
import (
"database/sql"
"encoding/json"
"flag"
"fmt"
"github.com/garyburd/redigo/redis"
"net/http"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/gin-gonic/gin"
"strings"
)
type Mate struct{
m_Name string
Chinese string
English string
Math string
}
type list struct{
fileName string
}
const(
redis_addr ="127.0.0.1:6379"
database="mysql"
database_user="root"
database_passwd="123456"
mysql_addr="127.0.0.1:3306"
databaseName="classmate"
TableName="sort"
server_listen_post=":8084"
server_relativePath="/events"
time_interval="1m"
)
// Valid time_interval units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
func Check (err error){
if err != nil{
fmt.Println(err)
}
}
func Link_redis(addr string) *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
// Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial.
Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) },
}
}
var (
pool *redis.Pool
redisServer = flag.String("redisServer", redis_addr, "")
)
func Link_mysql(databaseName string)(DB *sql.DB){
dataSourceName:=database_user+":"+database_passwd+"@tcp("+mysql_addr+")/"+databaseName
DB, _ = sql.Open(database,dataSourceName)
//设置数据库最大连接数
DB.SetConnMaxLifetime(100)
//设置上数据库最大闲置连接数
DB.SetMaxIdleConns(10)
//验证连接
if err := DB.Ping(); err != nil {
fmt.Println("open database fail")
return nil
}
fmt.Println("connnect success")
return DB
}
func Get_every_option(jsonStr string)(m map[string]interface{}){
err := json.Unmarshal([]byte(jsonStr), &m)
if err != nil{
fmt.Println("json解析失败")
return nil
}
return
}
func Time_Parsing(time1 string ,time2 string)(time.Time,time.Time,error){
formatTime1,err:=time.Parse("2006/01/02 15:04:05",time1)
if err != nil{
return time.Time{},time.Time{},err
}
formatTime2,err :=time.Parse("2006/01/02 15:04:05",time2)
if err != nil{
return time.Time{},time.Time{},err
}
return formatTime1,formatTime2,nil
}
func Time_check(time1 time.Time,time2 time.Time,time_interval string)bool{
//看看time2是否超过了 time1加一分钟
// ---1----2---
//获取一分钟之后的时间,和time2比较,
mm, _ := time.ParseDuration(time_interval)
mm1 := time1.Add(mm)
return mm1.Before(time2)
}
func Begin_server(relativePath string,listen_post string,DB *sql.DB,Pool *redis.Pool) {
router := gin.Default()
router.POST(relativePath, func(c *gin.Context) {
buf := make([]byte, 4096)
n, _ := c.Request.Body.Read(buf)
fmt.Println(string(buf[0:n]))
conn:=Pool.Get()
defer conn.Close()
//在这里处理ip问题使用redis
len, err := conn.Do("LLEN", c.ClientIP())
if err != nil {
fmt.Println("redis LLEN error:", err)
}
fmt.Println(len)
if len.(int64) ==5{
//如果大于5 就代表要去掉一个,并阻止连接
//LPOP list1
//计算第零个和现在时间的的差别
//LINDEX mylist 0
index_zero, err := conn.Do("LINDEX", c.ClientIP(),0)
if err != nil {
fmt.Println("redis LINDEX error:", err)
}
Now_time :=time.Now().Format("2006/01/02 15:04:05")
time1,time2,err:=Time_Parsing(string(index_zero.([]uint8)),Now_time)
if err != nil{
fmt.Println(err)
//如果拿的数据解析错误,可以是数据库已经脏了,删除,将重新导入
_, err = conn.Do("del", c.ClientIP())
if err != nil {
fmt.Println("redis del error:", err)
}
_, err = conn.Do("rpush", c.ClientIP(),Now_time)
if err != nil {
fmt.Println("redis rpush error:", err)
}
}
//之后对比时间大小
if Time_check(time1,time2,time_interval){
//如果超过了,
_, err = conn.Do("LPOP", c.ClientIP())
if err != nil {
fmt.Println("redis LPOP error:", err)
}
_, err = conn.Do("rpush", c.ClientIP(), Now_time)
if err != nil {
fmt.Println("redis rpush error:", err)
}
}else{
c.JSON(http.StatusOK, gin.H{
"error": "1",
})
return
}
}else if(len.(int64) >5){
//如果大于5个,可能数据库已经脏了,将指定IP删除重新加入
_, err = conn.Do("del", c.ClientIP())
if err != nil {
fmt.Println("redis del error:", err)
}
_, err = conn.Do("rpush", c.ClientIP(), time.Now().Format("2006/01/02 15:04:05"))
if err != nil {
fmt.Println("redis rpush error:", err)
}
}else{
_, err = conn.Do("rpush", c.ClientIP(), time.Now().Format("2006/01/02 15:04:05"))
if err != nil {
fmt.Println("redis rpush error:", err)
}
}
//now.Format("2006/1/2 15:04:05")
//在这里解析 收到的数据
m := Get_every_option(string(buf[0:n]))
for index, val := range m {
fmt.Println(index, val)
}
//在这里分析判断
//增加一行
if m["action"] == "1" {
if m["Provide"] != "" {
mate := strings.Split(m["Provide"].(string), " ")
for index, val := range mate {
fmt.Println(index, val)
}
stmt, err := DB.Prepare("INSERT INTO "+TableName+" SET m_Name=?,Chinese=?,English=?,Math=?")
if err != nil {
fmt.Println(err)
return
}
_, err = stmt.Exec(mate[0], mate[1], mate[2], mate[3])
if err != nil {
fmt.Println(err)
return
}else{
c.JSON(http.StatusOK, gin.H{
"error": "0",
})
}
fmt.Println("已插入", mate[0], mate[1], mate[2], mate[3])
} else {
fmt.Println("插入元素不足,无法插入")
c.JSON(http.StatusOK, gin.H{
"error": "1",
})
}
} else if (m["action"] == "2") {
//删除
stmt, err := DB.Prepare("DELETE FROM "+TableName+" where m_Name=?")
if err != nil {
fmt.Println(err)
return
}
_, err = stmt.Exec(m["Get_userID_data"])
if err != nil {
fmt.Println(err)
return
}else{
c.JSON(http.StatusOK, gin.H{
"error": "0",
})
}
} else if (m["action"] == "3") {
//修改
if m["Provide"] != "" {
mate := strings.Split(m["Provide"].(string), " ")
for index, val := range mate {
fmt.Println(index, val)
}
stmt, err := DB.Prepare("UPDATE "+TableName+" SET Chinese=?,English=?,Math=? where m_Name=?")
if err != nil {
fmt.Println(err)
return
}
_, err = stmt.Exec(mate[0], mate[1], mate[2], m["Get_userID_data"])
if err != nil {
fmt.Println(err)
return
}else{
c.JSON(http.StatusOK, gin.H{
"error": "1",
})
}
fmt.Println("已修改", m["Get_userID_data"], mate[0], mate[1], mate[2])
} else {
c.JSON(http.StatusOK, gin.H{
"error": "1",
})
}
}
if (m["action"] == "4") {
var mate Mate
if m["Get_userID_data"] == "nil" {
stmt, e := DB.Prepare("select * from " + TableName)
Check(e)
query, e := stmt.Query() //对应上面的"?",如果没有也不可以不填,但是不能跳过这步
Check(e)
every_mate := make([]string, 0, 10)
for query.Next() {
query.Scan(&mate.m_Name, &mate.Chinese, &mate.English, &mate.Math)
every_mate = append(every_mate, mate.m_Name+" "+mate.Chinese+" "+mate.English+" "+mate.Math)
}
c.JSON(http.StatusOK, gin.H{
"error": "0",
"data": every_mate,
})
} else if (m["Get_userID_data"] != "") {
stmt, e := DB.Prepare("select * from " + TableName + " where m_Name=?")
Check(e)
query, e := stmt.Query(m["Get_userID_data"]) //对应上面的"?",如果没有也不可以不填,但是不能跳过这步
Check(e)
for query.Next() {
e := query.Scan(&mate.m_Name, &mate.Chinese, &mate.English, &mate.Math)
if e == nil {
c.JSON(http.StatusOK, gin.H{
"m_Name": mate.m_Name,
"Chinese": mate.Chinese,
"English": mate.English,
"Math": mate.Math,
})
}
}
}
} else if (m["action"] == "5") {
stmt, e := DB.Prepare("show tables")
Check(e)
query, e := stmt.Query() //对应上面的"?",如果没有也不可以不填,但是不能跳过这步
m_list := make([]string, 0, 30)
aa := new(list)
if query.Next() {
e := query.Scan(&aa.fileName)
m_list = append(m_list, aa.fileName)
Check(e)
}
str, e := json.Marshal(m_list)
if e != nil {
fmt.Println("json化失败")
}
c.JSON(http.StatusOK, gin.H{
"error": "0",
"data": string(str),
})
}
})
router.Run(listen_post)
//resp := map[string]string{"hello": "world"}
/* c.JSON(http.StatusOK, gin.H{
"message": "pong",
})*/
}
func main() {
//连接数据库
DB:=Link_mysql(databaseName)
if DB ==nil{
fmt.Println("连接mysql失败")
return
}
defer DB.Close()
flag.Parse()
conn:= Link_redis(*redisServer)
defer conn.Close()
//接受来自客户端的json,检查元素,进行判断
Begin_server(server_relativePath,server_listen_post,DB,conn)
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/gofiber/fiber"
"github.com/gofiber/logger"
"os"
)
var version = "$VERSION"
type Hello struct {
Hello string `json:"hello"`
Version string `json:"version"`
}
func main() {
appSettings := new(fiber.Settings)
appSettings.CaseSensitive = true
appSettings.Prefork = true
app := fiber.New(appSettings)
app.Use(logger.New(logger.Config{
Filter: nil,
Format: func() string {
logVars := []string{
"time",
"referer",
"protocol",
"ip",
"host",
"method",
"path",
"url",
"latency",
"status",
}
logFormat := make(map[string]string)
for _, s := range logVars {
logFormat[s] = fmt.Sprintf("${%s}", s)
}
logJson, _ := json.Marshal(logFormat)
return fmt.Sprintf("%s\n", logJson)
}(),
TimeFormat: "",
Output: os.Stdout,
}))
app.All("/", func(c *fiber.Ctx) {
c.JSON(&Hello{"world", version})
})
app.Get("/healthz", func(c *fiber.Ctx) {
c.SendStatus(200)
})
app.Get("/kill", func(_ *fiber.Ctx){
defer os.Exit(0)
})
app.Listen(os.Getenv("PORT"))
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
goctx "context"
"fmt"
"math"
"os"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/pingcap/log"
"github.com/pingcap/tidb/store/gcworker"
"github.com/pingcap/tidb/table"
"github.com/stretchr/testify/require"
)
func getIndex(t table.Table, name string) table.Index {
for _, idx := range t.Indices() {
if idx.Meta().Name.O == name {
return idx
}
}
return nil
}
func (s *ddlSuite) checkDropIndex(t *testing.T, tableName string) {
gcWorker, err := gcworker.NewMockGCWorker(s.store)
require.NoError(t, err)
err = gcWorker.DeleteRanges(goctx.Background(), uint64(math.MaxInt32))
require.NoError(t, err)
s.mustExec(fmt.Sprintf("admin check table %s", tableName))
}
// TestIndex operations on table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c)).
func TestIndex(t *testing.T) {
err := os.Setenv("tidb_manager_ttl", fmt.Sprintf("%d", *lease+5))
if err != nil {
log.Fatal("set tidb_manager_ttl failed")
}
s := createDDLSuite(t)
defer s.teardown(t)
// first add many data
workerNum := 10
base := *dataNum / workerNum
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < base; j++ {
k := base*i + j
s.execInsert(
fmt.Sprintf("insert into test_index values (%d, %d, %f, '%s')",
k, randomInt(), randomFloat(), randomString(10)))
}
}(i)
}
wg.Wait()
tbl := []struct {
Query string
IndexName string
Add bool
}{
{"create index c1_index on test_index (c1)", "c1_index", true},
{"drop index c1_index on test_index", "c1_index", false},
{"create index c2_index on test_index (c2)", "c2_index", true},
{"drop index c2_index on test_index", "c2_index", false},
{"create index c3_index on test_index (c3)", "c3_index", true},
{"drop index c3_index on test_index", "c3_index", false},
}
insertID := int64(*dataNum)
for _, col := range tbl {
done := s.runDDL(col.Query)
ticker := time.NewTicker(time.Duration(*lease) * time.Second / 2)
//nolint:all_revive,revive
defer ticker.Stop()
LOOP:
for {
select {
case err := <-done:
require.NoError(t, err)
break LOOP
case <-ticker.C:
// add count new data
// delete count old data randomly
// update count old data randomly
count := 10
s.execIndexOperations(t, workerNum, count, &insertID)
}
}
tbl := s.getTable(t, "test_index")
index := getIndex(tbl, col.IndexName)
if col.Add {
require.NotNil(t, index)
s.mustExec("admin check table test_index")
} else {
require.Nil(t, index)
s.checkDropIndex(t, "test_index")
}
}
}
func (s *ddlSuite) execIndexOperations(t *testing.T, workerNum, count int, insertID *int64) {
var wg sync.WaitGroup
// workerNum = 10
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < count; j++ {
id := atomic.AddInt64(insertID, 1)
sql := fmt.Sprintf("insert into test_index values (%d, %d, %f, '%s')", id, randomInt(), randomFloat(), randomString(10))
s.execInsert(sql)
t.Logf("sql %s", sql)
sql = fmt.Sprintf("delete from test_index where c = %d", randomIntn(int(id)))
s.mustExec(sql)
t.Logf("sql %s", sql)
sql = fmt.Sprintf("update test_index set c1 = %d, c2 = %f, c3 = '%s' where c = %d", randomInt(), randomFloat(), randomString(10), randomIntn(int(id)))
s.mustExec(sql)
t.Logf("sql %s", sql)
}
}()
}
wg.Wait()
}
|
package dht
import (
"context"
"sync"
"sync/atomic"
"testing"
"time"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
queue "gx/ipfs/QmQFFp4ntkd4C14sP3FaH9WJyBuetuGUVo6dShNHvnoEvC/go-libp2p-peerstore/queue"
)
func TestDialQueueGrowsOnSlowDials(t *testing.T) {
in := queue.NewChanQueue(context.Background(), queue.NewXORDistancePQ("test"))
hang := make(chan struct{})
var cnt int32
dialFn := func(ctx context.Context, p peer.ID) error {
atomic.AddInt32(&cnt, 1)
<-hang
return nil
}
// Enqueue 20 jobs.
for i := 0; i < 20; i++ {
in.EnqChan <- peer.ID(i)
}
// remove the mute period to grow faster.
config := dqDefaultConfig()
config.maxIdle = 10 * time.Minute
config.mutePeriod = 0
dq, err := newDialQueue(&dqParams{
ctx: context.Background(),
target: "test",
in: in,
dialFn: dialFn,
config: config,
})
if err != nil {
t.Error("unexpected error when constructing the dial queue", err)
}
for i := 0; i < 4; i++ {
_ = dq.Consume()
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < 20; i++ {
if atomic.LoadInt32(&cnt) > int32(DefaultDialQueueMinParallelism) {
return
}
time.Sleep(100 * time.Millisecond)
}
t.Errorf("expected 19 concurrent dials, got %d", atomic.LoadInt32(&cnt))
}
func TestDialQueueShrinksWithNoConsumers(t *testing.T) {
// reduce interference from the other shrink path.
in := queue.NewChanQueue(context.Background(), queue.NewXORDistancePQ("test"))
hang := make(chan struct{})
wg := new(sync.WaitGroup)
wg.Add(13)
dialFn := func(ctx context.Context, p peer.ID) error {
wg.Done()
<-hang
return nil
}
config := dqDefaultConfig()
config.maxIdle = 10 * time.Minute
config.mutePeriod = 0
dq, err := newDialQueue(&dqParams{
ctx: context.Background(),
target: "test",
in: in,
dialFn: dialFn,
config: config,
})
if err != nil {
t.Error("unexpected error when constructing the dial queue", err)
}
// acquire 3 consumers, everytime we acquire a consumer, we will grow the pool because no dial job is completed
// and immediately returnable.
for i := 0; i < 3; i++ {
_ = dq.Consume()
}
// Enqueue 13 jobs, one per worker we'll grow to.
for i := 0; i < 13; i++ {
in.EnqChan <- peer.ID(i)
}
waitForWg(t, wg, 2*time.Second)
// Release a few dialFn, but not all of them because downscaling happens when workers detect there are no
// consumers to consume their values. So the other three will be these witnesses.
for i := 0; i < 3; i++ {
hang <- struct{}{}
}
// allow enough time for signalling and dispatching values to outstanding consumers.
time.Sleep(1 * time.Second)
// unblock the rest.
for i := 0; i < 10; i++ {
hang <- struct{}{}
}
wg = new(sync.WaitGroup)
// we should now only have 6 workers, because all the shrink events will have been honoured.
wg.Add(6)
// enqueue more jobs.
for i := 0; i < 6; i++ {
in.EnqChan <- peer.ID(i)
}
// let's check we have 6 workers hanging.
waitForWg(t, wg, 2*time.Second)
}
// Inactivity = workers are idle because the DHT query is progressing slow and is producing too few peers to dial.
func TestDialQueueShrinksWithWhenIdle(t *testing.T) {
in := queue.NewChanQueue(context.Background(), queue.NewXORDistancePQ("test"))
hang := make(chan struct{})
var wg sync.WaitGroup
wg.Add(13)
dialFn := func(ctx context.Context, p peer.ID) error {
wg.Done()
<-hang
return nil
}
// Enqueue 13 jobs.
for i := 0; i < 13; i++ {
in.EnqChan <- peer.ID(i)
}
config := dqDefaultConfig()
config.maxIdle = 1 * time.Second
config.mutePeriod = 0
dq, err := newDialQueue(&dqParams{
ctx: context.Background(),
target: "test",
in: in,
dialFn: dialFn,
config: config,
})
if err != nil {
t.Error("unexpected error when constructing the dial queue", err)
}
// keep up to speed with backlog by releasing the dial function every time we acquire a channel.
for i := 0; i < 13; i++ {
ch := dq.Consume()
hang <- struct{}{}
<-ch
time.Sleep(100 * time.Millisecond)
}
// wait for MaxIdlePeriod.
time.Sleep(1500 * time.Millisecond)
// we should now only have 6 workers, because all the shrink events will have been honoured.
wg.Add(6)
// enqueue more jobs
for i := 0; i < 10; i++ {
in.EnqChan <- peer.ID(i)
}
// let's check we have 6 workers hanging.
waitForWg(t, &wg, 2*time.Second)
}
func TestDialQueueMutePeriodHonored(t *testing.T) {
in := queue.NewChanQueue(context.Background(), queue.NewXORDistancePQ("test"))
hang := make(chan struct{})
var wg sync.WaitGroup
wg.Add(6)
dialFn := func(ctx context.Context, p peer.ID) error {
wg.Done()
<-hang
return nil
}
// Enqueue a bunch of jobs.
for i := 0; i < 20; i++ {
in.EnqChan <- peer.ID(i)
}
config := dqDefaultConfig()
config.mutePeriod = 2 * time.Second
dq, err := newDialQueue(&dqParams{
ctx: context.Background(),
target: "test",
in: in,
dialFn: dialFn,
config: config,
})
if err != nil {
t.Error("unexpected error when constructing the dial queue", err)
}
// pick up three consumers.
for i := 0; i < 3; i++ {
_ = dq.Consume()
time.Sleep(100 * time.Millisecond)
}
time.Sleep(500 * time.Millisecond)
// we'll only have 6 workers because the grow signals have been ignored.
waitForWg(t, &wg, 2*time.Second)
}
func waitForWg(t *testing.T, wg *sync.WaitGroup, wait time.Duration) {
t.Helper()
done := make(chan struct{})
go func() {
defer close(done)
wg.Wait()
}()
select {
case <-time.After(wait):
t.Error("timeout while waiting for WaitGroup")
case <-done:
}
}
|
package model
import (
"fmt"
)
type Response struct {
Status string `json:"status"`
Copyright string `json:"copyright"`
}
func PropublicaModel() {
fmt.Println("PropublicaModel")
}
|
package main
import (
"bytes"
"fmt"
"github.com/coreos/go-etcd/etcd"
"github.com/miekg/dns"
"github.com/rcrowley/go-metrics"
"net"
"strconv"
"strings"
"sync"
"time"
)
type Resolver struct {
etcd *etcd.Client
etcdPrefix string
defaultTtl uint32
}
type EtcdRecord struct {
node *etcd.Node
ttl uint32
}
// GetFromStorage looks up a key in etcd and returns a slice of nodes. It supports two storage structures;
// - File: /foo/bar/.A -> "value"
// - Directory: /foo/bar/.A/0 -> "value-0"
// /foo/bar/.A/1 -> "value-1"
func (r *Resolver) GetFromStorage(key string) (nodes []*EtcdRecord, err error) {
counter := metrics.GetOrRegisterCounter("resolver.etcd.query_count", metrics.DefaultRegistry)
error_counter := metrics.GetOrRegisterCounter("resolver.etcd.query_error_count", metrics.DefaultRegistry)
counter.Inc(1)
debugMsg("Querying etcd for " + key)
response, err := r.etcd.Get(r.etcdPrefix+key, true, true)
if err != nil {
error_counter.Inc(1)
return
}
var findKeys func(node *etcd.Node, ttl uint32, tryTtl bool)
nodes = make([]*EtcdRecord, 0)
findKeys = func(node *etcd.Node, ttl uint32, tryTtl bool) {
if node.Dir == true {
var lastValNode *etcd.Node
for _, node := range node.Nodes {
if strings.HasSuffix(node.Key, ".ttl") {
ttlValue, err := strconv.ParseUint(node.Value, 10, 32)
if err != nil {
debugMsg("Unable to convert ttl value to int: ", node.Value)
} else if lastValNode == nil {
debugMsg(".ttl node with no matching value node: ", node.Key)
} else {
findKeys(lastValNode, uint32(ttlValue), false)
lastValNode = nil
continue
}
} else {
if lastValNode != nil {
findKeys(lastValNode, r.defaultTtl, false)
}
lastValNode = node
}
}
if lastValNode != nil {
findKeys(lastValNode, r.defaultTtl, false)
}
} else {
// If for some reason this is passed a ttl node unexpectedly, bail
if strings.HasSuffix(node.Key, ".ttl") {
debugMsg("Unexpected .ttl node", node.Key)
return
}
// If we don't have a TLL try and find one
if tryTtl {
ttlKey := node.Key + ".ttl"
debugMsg("Querying etcd for " + ttlKey)
response, err := r.etcd.Get(ttlKey, false, false)
if err == nil {
ttlValue, err := strconv.ParseUint(response.Node.Value, 10, 32)
if err != nil {
debugMsg("Unable to convert ttl value to int: ", response.Node.Value)
} else {
ttl = uint32(ttlValue)
}
}
}
nodes = append(nodes, &EtcdRecord{node, ttl})
}
}
findKeys(response.Node, r.defaultTtl, true)
return
}
// Authority returns a dns.RR describing the know authority for the given
// domain. It will recurse up the domain structure to find an SOA record that
// matches.
func (r *Resolver) Authority(domain string) (soa *dns.SOA) {
tree := strings.Split(domain, ".")
for i, _ := range tree {
subdomain := strings.Join(tree[i:], ".")
// Check for an SOA entry
answers, err := r.LookupAnswersForType(subdomain, dns.TypeSOA)
if err != nil {
return
}
if len(answers) == 1 {
soa = answers[0].(*dns.SOA)
soa.Serial = uint32(time.Now().Truncate(time.Hour).Unix())
return
}
}
// Maintain a counter for when we don't have an authority for a domain.
missing_counter := metrics.GetOrRegisterCounter("resolver.authority.missing_soa", metrics.DefaultRegistry)
missing_counter.Inc(1)
return
}
// Lookup responds to DNS messages of type Query, with a dns message containing Answers.
// In the event that the query's value+type yields no known records, this falls back to
// querying the given nameservers instead.
func (r *Resolver) Lookup(req *dns.Msg) (msg *dns.Msg) {
q := req.Question[0]
msg = new(dns.Msg)
msg.SetReply(req)
msg.Authoritative = true
msg.RecursionAvailable = false // We're a nameserver, no recursion for you!
answers := []dns.RR{}
errors := []error{}
errored := false
var aChan chan dns.RR
var eChan chan error
if q.Qclass == dns.ClassINET {
aChan, eChan = r.AnswerQuestion(q)
answers, errors = gatherFromChannels(aChan, eChan)
}
errored = errored || len(errors) > 0
if len(answers) == 0 {
// If we failed to find any answers, let's keep looking up the tree for
// any wildcard domain entries.
parts := strings.Split(q.Name, ".")
for level := 1; level < len(parts); level++ {
domain := strings.Join(parts[level:], ".")
if len(domain) > 1 {
question := dns.Question{
Name: "*." + dns.Fqdn(domain),
Qtype: q.Qtype,
Qclass: q.Qclass}
aChan, eChan = r.AnswerQuestion(question)
answers, errors = gatherFromChannels(aChan, eChan)
errored = errored || len(errors) > 0
if len(answers) > 0 {
break
}
}
}
}
miss_counter := metrics.GetOrRegisterCounter("resolver.answers.miss", metrics.DefaultRegistry)
hit_counter := metrics.GetOrRegisterCounter("resolver.answers.hit", metrics.DefaultRegistry)
error_counter := metrics.GetOrRegisterCounter("resolver.answers.error", metrics.DefaultRegistry)
if errored {
// TODO(tarnfeld): Send special TXT records with a server error response code
error_counter.Inc(1)
msg.SetRcode(req, dns.RcodeServerFailure)
} else if len(answers) == 0 {
soa := r.Authority(q.Name)
miss_counter.Inc(1)
msg.SetRcode(req, dns.RcodeNameError)
if soa != nil {
msg.Ns = []dns.RR{soa}
} else {
msg.Authoritative = false // No SOA? We're not authoritative
}
} else {
hit_counter.Inc(1)
for _, rr := range answers {
rr.Header().Name = q.Name
msg.Answer = append(msg.Answer, rr)
}
}
return
}
// Gather up results from answer and error channels into slices. Waits for the
// channels to be closed before returning.
func gatherFromChannels(rrsIn chan dns.RR, errsIn chan error) (rrs []dns.RR, errs []error) {
rrs = []dns.RR{}
errs = []error{}
done := 0
for done < 2 {
select {
case rr, ok := <-rrsIn:
if ok {
rrs = append(rrs, rr)
} else {
done++
}
case err, ok := <-errsIn:
if ok {
debugMsg("Caught error", err)
errs = append(errs, err)
} else {
done++
}
}
}
return rrs, errs
}
// AnswerQuestion takes two channels, one for answers and one for errors. It will answer the
// given question writing the answers as dns.RR structures, and any errors it encounters along
// the way. The function will return immediately, and spawn off a bunch of goroutines
// to do the work, when using this function one should use a WaitGroup to know when all work
// has been completed.
func (r *Resolver) AnswerQuestion(q dns.Question) (answers chan dns.RR, errors chan error) {
answers = make(chan dns.RR)
errors = make(chan error)
typeStr := strings.ToLower(dns.TypeToString[q.Qtype])
type_counter := metrics.GetOrRegisterCounter("resolver.answers.type."+typeStr, metrics.DefaultRegistry)
type_counter.Inc(1)
debugMsg("Answering question ", q)
if q.Qtype == dns.TypeANY {
wg := sync.WaitGroup{}
wg.Add(len(converters))
go func() {
wg.Wait()
close(answers)
close(errors)
}()
for rrType, _ := range converters {
go func(rrType uint16) {
defer func() { recover() }()
defer wg.Done()
results, err := r.LookupAnswersForType(q.Name, rrType)
if err != nil {
errors <- err
} else {
for _, answer := range results {
answers <- answer
}
}
}(rrType)
}
} else if _, ok := converters[q.Qtype]; ok {
go func() {
defer func() {
close(answers)
close(errors)
}()
records, err := r.LookupAnswersForType(q.Name, q.Qtype)
if err != nil {
errors <- err
} else {
if len(records) > 0 {
for _, rr := range records {
answers <- rr
}
} else {
cnames, err := r.LookupAnswersForType(q.Name, dns.TypeCNAME)
if err != nil {
errors <- err
} else {
if len(cnames) > 1 {
errors <- &RecordValueError{
Message: "Multiple CNAME records is invalid",
AttemptedType: dns.TypeCNAME}
} else if len(cnames) > 0 {
answers <- cnames[0]
}
}
}
}
}()
} else {
// nothing we can do
close(answers)
close(errors)
}
return answers, errors
}
func (r *Resolver) LookupAnswersForType(name string, rrType uint16) (answers []dns.RR, err error) {
name = strings.ToLower(name)
typeStr := dns.TypeToString[rrType]
nodes, err := r.GetFromStorage(nameToKey(name, "/."+typeStr))
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
return answers, nil
}
}
return
}
answers = make([]dns.RR, len(nodes))
for i, node := range nodes {
header := dns.RR_Header{Name: name, Class: dns.ClassINET, Rrtype: rrType, Ttl: node.ttl}
answer, err := converters[rrType](node.node, header)
if err != nil {
debugMsg("Error converting type: ", err)
return nil, err
}
answers[i] = answer
}
return
}
// nameToKey returns a string representing the etcd version of a domain, replacing dots with slashes
// and reversing it (foo.net. -> /net/foo)
func nameToKey(name string, suffix string) string {
segments := strings.Split(name, ".")
var keyBuffer bytes.Buffer
for i := len(segments) - 1; i >= 0; i-- {
if len(segments[i]) > 0 {
keyBuffer.WriteString("/")
keyBuffer.WriteString(segments[i])
}
}
keyBuffer.WriteString(suffix)
return keyBuffer.String()
}
// Map of conversion functions that turn individual etcd nodes into dns.RR answers
var converters = map[uint16]func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error){
dns.TypeA: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
ip := net.ParseIP(node.Value)
if ip == nil {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Failed to parse %s as IP Address", node.Value),
AttemptedType: dns.TypeA,
}
} else if ip.To4() == nil {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Value %s isn't an IPv4 address", node.Value),
AttemptedType: dns.TypeA,
}
} else {
rr = &dns.A{header, ip}
}
return
},
dns.TypeAAAA: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
ip := net.ParseIP(node.Value)
if ip == nil {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Failed to parse IP Address %s", node.Value),
AttemptedType: dns.TypeAAAA}
} else if ip.To16() == nil {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Value %s isn't an IPv6 address", node.Value),
AttemptedType: dns.TypeA}
} else {
rr = &dns.AAAA{header, ip}
}
return
},
dns.TypeTXT: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
rr = &dns.TXT{header, []string{node.Value}}
return
},
dns.TypeCNAME: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
rr = &dns.CNAME{header, dns.Fqdn(node.Value)}
return
},
dns.TypeNS: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
rr = &dns.NS{header, dns.Fqdn(node.Value)}
return
},
dns.TypePTR: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
labels, ok := dns.IsDomainName(node.Value)
if ok && labels > 0 {
rr = &dns.PTR{header, dns.Fqdn(node.Value)}
} else {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Value '%s' isn't a valid domain name", node.Value),
AttemptedType: dns.TypePTR}
}
return
},
dns.TypeSRV: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
parts := strings.SplitN(node.Value, "\t", 4)
if len(parts) != 4 {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Value %s isn't valid for SRV", node.Value),
AttemptedType: dns.TypeSRV}
} else {
priority, err := strconv.ParseUint(parts[0], 10, 16)
if err != nil {
return nil, err
}
weight, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return nil, err
}
port, err := strconv.ParseUint(parts[2], 10, 16)
if err != nil {
return nil, err
}
target := dns.Fqdn(parts[3])
rr = &dns.SRV{
header,
uint16(priority),
uint16(weight),
uint16(port),
target}
}
return
},
dns.TypeSOA: func(node *etcd.Node, header dns.RR_Header) (rr dns.RR, err error) {
parts := strings.SplitN(node.Value, "\t", 6)
if len(parts) < 6 {
err = &NodeConversionError{
Node: node,
Message: fmt.Sprintf("Value %s isn't valid for SOA", node.Value),
AttemptedType: dns.TypeSOA}
} else {
refresh, err := strconv.ParseUint(parts[2], 10, 32)
if err != nil {
return nil, err
}
retry, err := strconv.ParseUint(parts[3], 10, 32)
if err != nil {
return nil, err
}
expire, err := strconv.ParseUint(parts[4], 10, 32)
if err != nil {
return nil, err
}
minttl, err := strconv.ParseUint(parts[5], 10, 32)
if err != nil {
return nil, err
}
rr = &dns.SOA{
Hdr: header,
Ns: dns.Fqdn(parts[0]),
Mbox: dns.Fqdn(parts[1]),
Refresh: uint32(refresh),
Retry: uint32(retry),
Expire: uint32(expire),
Minttl: uint32(minttl)}
}
return
},
}
|
package foundation
import (
"html/template"
"testing"
"github.com/stretchr/testify/assert"
)
// TestDelimiters runs
func TestDelimiters(t *testing.T) {
assert := assert.New(t)
d := Delimiters{}
assert.False(d.isValid())
d.Left, d.Right = "[%", "%]"
assert.True(d.isValid())
l, r := d.Get()
assert.Equal("[%", l)
assert.Equal("%]", r)
}
// TestTemplate runs
func TestTemplate(t *testing.T) {
assert := assert.New(t)
type candidate struct {
relativePath string
filename string
expected string
}
candidates := []candidate{
{
"/gophergala2016/source/docs/api",
"/gophergala2016/source/docs/api/schemata/app.json",
"schemata/app.json",
},
{
"/gophergala2016/source/docs/",
"/gophergala2016/source/docs/api/schemata/app.json",
"api/schemata/app.json",
},
{
"/gophergala2016/source/docs/api/",
"/gophergala2016/source/docs/api/schemata/app.json",
"schemata/app.json",
},
}
for _, c := range candidates {
name := templateName(c.relativePath, c.filename)
assert.Equal(name, c.expected)
}
}
// TestApplyFilters runs
func TestApplyFilters(t *testing.T) {
assert := assert.New(t)
var filters []Filter
assert.Panics(func() {
applyFilters(nil, filters...)
})
templ := template.New("")
filters = []Filter{
{"nil", nil},
{"str", func() string { return "" }},
}
assert.NotPanics(func() {
applyFilters(templ, filters...)
})
}
|
// This package helps in counting number of segment when we send text using twilio service.
// Twilio services charge on number of segments present in text.
package smsSegment
import (
"math"
)
// Define constants
const (
EncodingGSM = "GSM"
EncodingUCS2 = "UCS-2"
GSMChractersLimit = 160
GSMChractersMultiMessageLimit = 153
UCS2ChractersLimit = 70
UCS2ChractersMultiMessageLimit = 67
)
// SMS defines entity to store twilio SMS properties.
type SMS struct {
text string
encoding string
chracters int
segments int
}
// GSMCharacterSet defines all GSM characters.
func GSMCharacterSet() map[string]int {
characters := map[string]int{
"@": 1,
"£": 1,
"$": 1,
"¥": 1,
"è": 1,
"é": 1,
"ù": 1,
"ì": 1,
"ò": 1,
"Ç": 1,
"\n": 1,
"Ø": 1,
"ø": 1,
"\r": 1,
"Å": 1,
"å": 1,
"Δ": 1,
"_": 1,
"Φ": 1,
"Γ": 1,
"Λ": 1,
"Ω": 1,
"Π": 1,
"Ψ": 1,
"Σ": 1,
"Θ": 1,
"Ξ": 1,
//"ESC": 1,
"Æ": 1,
"æ": 1,
"ß": 1,
"É": 1,
" ": 1,
"!": 1,
"\"": 1,
"#": 1,
"¤": 1,
"%": 1,
"&": 1,
"'": 1,
"(": 1,
")": 1,
"*": 1,
"+": 1,
",": 1,
"-": 1,
".": 1,
"/": 1,
"0": 1,
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
":": 1,
";": 1,
"<": 1,
"=": 1,
">": 1,
"?": 1,
"¡": 1,
"A": 1,
"B": 1,
"C": 1,
"D": 1,
"E": 1,
"F": 1,
"G": 1,
"H": 1,
"I": 1,
"J": 1,
"K": 1,
"L": 1,
"M": 1,
"N": 1,
"O": 1,
"P": 1,
"Q": 1,
"R": 1,
"S": 1,
"T": 1,
"U": 1,
"V": 1,
"W": 1,
"X": 1,
"Y": 1,
"Z": 1,
"Ä": 1,
"Ö": 1,
"Ñ": 1,
"Ü": 1,
"§": 1,
"¿": 1,
"a": 1,
"b": 1,
"c": 1,
"d": 1,
"e": 1,
"f": 1,
"g": 1,
"h": 1,
"i": 1,
"j": 1,
"k": 1,
"l": 1,
"m": 1,
"n": 1,
"o": 1,
"p": 1,
"q": 1,
"r": 1,
"s": 1,
"t": 1,
"u": 1,
"v": 1,
"w": 1,
"x": 1,
"y": 1,
"z": 1,
"ä": 1,
"ö": 1,
"ñ": 1,
"ü": 1,
"à": 1,
//"FF": 2,
//"CR2": 2,
"^": 2,
//"SS2": 2,
"{": 2,
"}": 2,
"\\": 2,
"[": 2,
"~": 2,
"]": 2,
"|": 2,
"€": 2,
}
return characters
}
// NewSMS is a like constructors to initlize SMS
func NewSMS(text string) *SMS {
sms := new(SMS)
sms.text = text
sms.getEncodingAndCount()
if sms.encoding == EncodingGSM {
sms.getGSMEncodingSegment()
} else {
sms.getUCSEncodingSegment()
}
return sms
}
// GetSegments return number of segment present in text.
func (sms *SMS) GetSegments() int {
return sms.segments
}
// GetEncoding return number of segment present in text.
func (sms *SMS) GetEncoding() string {
return sms.encoding
}
// GetCharacters return number of segment present in text.
func (sms *SMS) GetCharacters() int {
return sms.chracters
}
// if message character length is less than or equal to 160, then twilio considers 1 segment.
// for more than 160 characters twilio break the message into multiple message. and please Note that special header needs to be appended to handle concatenated messages, so each segment can only contain up to 153 characters.
func (sms *SMS) getGSMEncodingSegment() {
if sms.chracters <= GSMChractersLimit {
sms.segments = 1
return
}
d := float64(float64(sms.chracters) / float64(GSMChractersMultiMessageLimit))
sms.segments = int(math.Ceil(d))
}
// if message character length is less than or equal to 160, then twilio considers 1 segment.
// for more than 70 characters twilio break the message into multiple message. and please Note that special header needs to be
// appended to handle concatenated messages, so each segment can only contain up to 67 characters.
func (sms *SMS) getUCSEncodingSegment() {
if sms.chracters <= UCS2ChractersLimit {
sms.segments = 1
return
}
d := float64(float64(sms.chracters) / float64(UCS2ChractersMultiMessageLimit))
sms.segments = int(math.Ceil(d))
}
func (sms *SMS) getEncodingAndCount() {
encoding := EncodingGSM
textLength := 0
gSMCharacters := GSMCharacterSet()
for _, char := range sms.text {
if _, exist := gSMCharacters[string(char)]; exist {
textLength += gSMCharacters[string(char)]
} else {
encoding = EncodingUCS2
textLength++
}
}
sms.chracters = textLength
sms.encoding = encoding
}
|
package lengthsafe
import (
"os"
"strings"
"sync/atomic"
"time"
"golang.org/x/sys/unix"
"git.scc.kit.edu/sdm/lsdf-checksum/internal/osutils"
)
const (
PathMax uint = 4096
POSIXSymlinkMax uint = 255
findBoundsBaseLength uint = 256
sampleStr string = "0123456789"
symlinkProgressWait time.Duration = 500 * time.Microsecond
)
var symlinkMax uint
var symlinkMaxIsSet uint32
var symlinkMaxInProgress uint32
func ensureSymlinkMaxSet() error {
isSet := atomic.LoadUint32(&symlinkMaxIsSet)
if isSet == 0 {
inProgress := atomic.SwapUint32(&symlinkMaxInProgress, 1)
if inProgress == 0 {
err := findSymlinkMax()
atomic.StoreUint32(&symlinkMaxInProgress, 0)
return err
} else {
timer := time.NewTimer(symlinkProgressWait)
for {
<-timer.C
inProgress = atomic.LoadUint32(&symlinkMaxInProgress)
if inProgress == 0 {
break
}
timer.Reset(symlinkProgressWait)
}
}
}
return nil
}
func findSymlinkMax() error {
var ok bool
var tryLength uint
// Start by finding lower and upper bound through exponentially increasing steps
lower, upper, err := findBounds()
if err != nil {
return err
}
// Check the two values next to each of the bounds...
// E.g. often the limit is 4095, 1 less than the discovered uppoer bound of 4096
tryLength, ok, err = searchDirectional(upper, false, 2)
if err != nil {
return err
}
if ok {
symlinkMax = tryLength
return nil
}
tryLength, ok, err = searchDirectional(lower, true, 2)
if err != nil {
return err
}
if ok {
symlinkMax = tryLength
return nil
}
// Finally do a binary search between the bounds
tryLength, ok, err = searchBinary(lower, upper)
if err != nil {
return err
}
if ok {
symlinkMax = tryLength
return nil
}
return nil
}
// findBounds returns a lower and upper bound for SYMLINK_MAX.
// The function will try exponentially-increasing symlink lengths.
// lower is the greatest still accepted symlink length, upper is the lowest
// rejected symlink length.
// lower <= SYMLINK_MAX < upper
func findBounds() (lower uint, upper uint, err error) {
var ok bool
lower = uint(0)
upper = findBoundsBaseLength
for {
ok, err = trySymlinkLength(upper)
if err != nil {
return 0, 0, err
}
if !ok {
return lower, upper, nil
}
lower = upper
upper *= 2
}
}
func searchDirectional(length uint, forward bool, n uint) (uint, bool, error) {
tryLength := length
for i := uint(1); i <= n; i++ {
if forward {
tryLength++
} else {
tryLength--
}
ok, err := trySymlinkLength(tryLength)
if err != nil {
return 0, false, err
}
if ok != forward {
if forward {
tryLength--
}
return tryLength, true, nil
}
}
return 0, false, nil
}
func searchBinary(start, end uint) (uint, bool, error) {
var tryLength uint
for {
if end-start <= 1 {
break
}
tryLength = start + (end-start)/2
ok, err := trySymlinkLength(tryLength)
if err != nil {
return 0, false, err
}
if ok {
start = tryLength
} else {
end = tryLength
}
}
return start, true, nil
}
func trySymlinkLength(length uint) (bool, error) {
// The length calculation is: ceil(length / |sampleStr|)
oldname := strings.Repeat(sampleStr, (int(length-1)/len(sampleStr))+1)[:length]
name, err := osutils.CreateTempSymlink(oldname, lengthsafePrefix, "", "")
if err != nil {
if isNameTooLong(err) {
return false, nil
}
return false, err
}
_ = os.Remove(name)
return true, nil
}
func isNameTooLong(err error) bool {
if linkErr, ok := err.(*os.LinkError); ok {
if linkErr.Err == unix.ENAMETOOLONG {
return true
}
}
return false
}
|
/*
Create a strut type, Virtmach, to track information about virtual machines.
Your record can track whatever you'd like, but values like ip, hostname, diskgb, ram, could all be possible values.
Create at least two (2) methods that allow you to interact with your strut. If you come up with your own working solution, push it to your SCM (GitHub)
, then share a link to it with the class (if you're in an online class, post in the chat).
*/
package main
import (
"fmt"
)
type vm struct {
ip string
hostname string
diskgb int
ram int
}
func (v *vm) newram(newRam int) {
v.ram = newRam
}
func vmreport(v *vm) {
fmt.Printf("laptop %v has %v gb of ram\n", v.hostname, v.ram)
}
func main() {
virtmachine := vm {
ip: "10.0.0.999",
hostname: "bills laptop",
diskgb: 500,
ram: 256,
}
vmreport(&virtmachine)
virtmachine.newram(512)
vmreport(&virtmachine)
}
|
package protoform
import "regexp"
type mapTypeExtract struct {
expr *regexp.Regexp
matches [][]string
}
func (m *mapTypeExtract) compile() {
m.expr = regexp.MustCompile(`.*Map\<(.+)\>.*`)
}
func (m mapTypeExtract) matchIndex() int {
return 0
}
func (m mapTypeExtract) groupIndex() int {
return 1
}
func (m *mapTypeExtract) defaultFind(s string) (match string) {
if m.expr == nil {
m.compile()
}
m.matches = m.expr.FindAllStringSubmatch(s, -1)
if m.safe() {
match = m.matches[m.matchIndex()][m.groupIndex()]
}
return match
}
func (m *mapTypeExtract) fnFind(fn findFunc) string {
return fn(m.matches)
}
func (m mapTypeExtract) safe() (safe bool) {
if len(m.matches) > m.matchIndex() && len(m.matches[m.matchIndex()]) > m.groupIndex() {
safe = true
}
return safe
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/julienschmidt/httprouter"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"io/ioutil"
"log"
"net/http"
)
type (
// ComponentController represents the controller for operating on the Component resource
ComponentController struct {
session *mgo.Session
}
)
// NewComponentController provides a reference to a ComponentController with provided mongo session
func NewComponentController(s *mgo.Session) *ComponentController {
return &ComponentController{s}
}
// GetComponent retrieves an specific component
func (cc ComponentController) GetComponent(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
log.Printf("Will get component %s", p.ByName("id"))
// Grab id
id := p.ByName("id")
// Verify id is ObjectId, otherwise bail
if !bson.IsObjectIdHex(id) {
w.WriteHeader(404)
return
}
// Grab id
oid := bson.ObjectIdHex(id)
// Stub component
c := Component{}
// Fetch component
if err := cc.session.DB("heart").C("components").FindId(oid).One(&c); err != nil {
log.Printf("No such component: %v", err)
w.WriteHeader(500)
return
}
// Marshal provided interface into JSON structure
componentJSON, _ := json.Marshal(c)
// Write content-type, statuscode, payload
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", componentJSON)
}
// CreateComponent creates a new component resource
func (cc ComponentController) CreateComponent(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
log.Printf("Create component")
// Stub an component to be populated from the body
c := Component{}
param, _ := ioutil.ReadAll(r.Body)
json.Unmarshal(param, &c)
// Add an Id
c.ID = bson.NewObjectId()
// Write the component to mongo
cc.session.DB("heart").C("components").Insert(c)
// Marshal provided interface into JSON structure
componentJSON, _ := json.Marshal(c)
// Write content-type, statuscode, payload
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(201)
fmt.Fprintf(w, "%s", componentJSON)
}
// RemoveComponent removes an existing component resource
func (cc ComponentController) RemoveComponent(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
log.Printf("Remove component")
// Grab id
id := p.ByName("id")
// Verify id is ObjectId, otherwise bail
if !bson.IsObjectIdHex(id) {
w.WriteHeader(404)
return
}
// Grab id
oid := bson.ObjectIdHex(id)
// Remove component
if err := cc.session.DB("heart").C("components").RemoveId(oid); err != nil {
w.WriteHeader(500)
return
}
// Write status
w.WriteHeader(200)
}
// ListComponents lists all components.
func (cc ComponentController) ListComponents(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
log.Printf("Listing all components...")
// Stub component
var compList []Component
// Fetch component
if err := cc.session.DB("heart").C("components").Find(bson.M{}).All(&compList); err != nil {
log.Printf("Failed to get components: %v", err)
w.WriteHeader(500)
return
}
// Marshal provided interface into JSON structure
componentJSON, _ := json.Marshal(compList)
// Write content-type, statuscode, payload
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", componentJSON)
}
// ComputeRisk starts a risk computation for later retrieval.
func (cc ComponentController) ComputeRisk(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// TODO This is a race condition.
if computingInProgress {
w.WriteHeader(409)
fmt.Fprintf(w, "Computation already in progress.")
return
}
queryValues := r.URL.Query()
ifParams := queryValues.Get("ignore_freetext")
log.Printf("Ignoring freetext: %v", ifParams)
go ComputeRisk(cc, ifParams)
// Write content-type, statuscode, payload
w.WriteHeader(200)
fmt.Fprintf(w, "Computation started.")
}
// GetVulnerabilities gets a list of all vulnerability names found during the compute step.
func (cc ComponentController) GetVulnerabilities(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
var vulnNames []string
coll := cc.session.DB("heart").C("risk")
cnt, _ := coll.Find(nil).Limit(1).Count()
if cnt == 0 {
w.WriteHeader(404)
fmt.Fprintf(w, "No vulnerabilities. Please run compute.")
return
}
if err := coll.Find(nil).Distinct("name", &vulnNames); err != nil {
log.Printf("Failed to fetch vulnerability names: %v", err)
w.WriteHeader(500)
fmt.Fprintf(w, "Failed to get vulnerabilities: %v", err)
return
}
// Marshal provided interface into JSON structure
componentJSON, _ := json.Marshal(vulnNames)
// Write content-type, statuscode, payload
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", componentJSON)
}
// GetVulnerability fetches a single vulnerability.
func (cc ComponentController) GetVulnerability(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
name := p.ByName("name")
var vulnSumm VulnerabilitySummary
coll := cc.session.DB("heart").C("risk")
cnt, _ := coll.Find(bson.M{"name": name}).Limit(1).Count()
if cnt == 0 {
w.WriteHeader(404)
fmt.Fprintf(w, "No such vulnerability.")
return
}
if err := coll.Find(bson.M{"name": name}).One(&vulnSumm); err != nil {
log.Printf("Failed to fetch vulnerability: %v", err)
w.WriteHeader(500)
fmt.Fprintf(w, "Failed to get vulnerability: %v", name)
return
}
// Marshal provided interface into JSON structure
componentJSON, _ := json.Marshal(vulnSumm)
// Write content-type, statuscode, payload
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
fmt.Fprintf(w, "%s", componentJSON)
}
// CheckComputation checks if a computation is running.
func (cc ComponentController) CheckComputation(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
if computingInProgress {
w.WriteHeader(200)
fmt.Fprintf(w, "Yes")
} else {
w.WriteHeader(404)
fmt.Fprintf(w, "No")
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//208. Implement Trie (Prefix Tree)
//Implement a trie with insert, search, and startsWith methods.
//Note:
//You may assume that all inputs are consist of lowercase letters a-z.
//type Trie struct {
//}
///** Initialize your data structure here. */
//func Constructor() Trie {
//}
///** Inserts a word into the trie. */
//func (this *Trie) Insert(word string) {
//}
///** Returns if the word is in the trie. */
//func (this *Trie) Search(word string) bool {
//}
///** Returns if there is any word in the trie that starts with the given prefix. */
//func (this *Trie) StartsWith(prefix string) bool {
//}
///**
// * Your Trie object will be instantiated and called as such:
// * obj := Constructor();
// * obj.Insert(word);
// * param_2 := obj.Search(word);
// * param_3 := obj.StartsWith(prefix);
// */
// Time Is Money
|
// Copyright 2021 Akamai Technologies, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
client "github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1"
gtm "github.com/akamai/AkamaiOPEN-edgegrid-golang/reportsgtm-v1" // Note: imports ./configgtm-v1_3
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"strconv"
"time"
)
var (
gtmPropertyTrafficExporter GTMPropertyTrafficExporter
)
type GTMPropertyTrafficExporter struct {
GTMConfig GTMMetricsConfig
PropertyMetricPrefix string
PropertyLookbackDuration time.Duration
LastTimestamp map[string]map[string]time.Time // index by domain, property
PropertyRegistry *prometheus.Registry
}
func NewPropertyTrafficCollector(r *prometheus.Registry, gtmMetricsConfig GTMMetricsConfig, gtmMetricPrefix string, tstart time.Time, lookbackDuration time.Duration) *GTMPropertyTrafficExporter {
gtmPropertyTrafficExporter = GTMPropertyTrafficExporter{GTMConfig: gtmMetricsConfig, PropertyLookbackDuration: lookbackDuration}
gtmPropertyTrafficExporter.PropertyMetricPrefix = gtmMetricPrefix + "property_traffic"
gtmPropertyTrafficExporter.PropertyLookbackDuration = lookbackDuration
gtmPropertyTrafficExporter.PropertyRegistry = r
// Populate LastTimestamp per domain, property. Start time applies to all.
domainMap := make(map[string]map[string]time.Time)
for _, domain := range gtmMetricsConfig.Domains {
propertyReqSummaryMap[domain.Name] = make(map[string]prometheus.Summary)
tStampMap := make(map[string]time.Time) // index by zone name
for _, prop := range domain.Properties {
tStampMap[prop.Name] = tstart
// Create and register Summaries by domain, property. TODO: finer granualarity?
propertySumMap := createPropertyMaps(domain.Name, prop.Name)
r.MustRegister(propertySumMap)
}
domainMap[domain.Name] = tStampMap
}
gtmPropertyTrafficExporter.LastTimestamp = domainMap
return >mPropertyTrafficExporter
}
// Summaries map by domain and property
var propertyReqSummaryMap = make(map[string]map[string]prometheus.Summary)
// Initialize locally maintained maps. Only use domain and property.
func createPropertyMaps(domain, prop string) prometheus.Summary {
labels := prometheus.Labels{"domain": domain, "property": prop}
propertyReqSummaryMap[domain][prop] = prometheus.NewSummary(
prometheus.SummaryOpts{
Namespace: gtmPropertyTrafficExporter.PropertyMetricPrefix,
Name: "requests_per_interval_summary",
Help: "Number of aggregate property requests per 5 minute interval (per domain)",
MaxAge: gtmPropertyTrafficExporter.PropertyLookbackDuration,
BufCap: prometheus.DefBufCap * 2,
ConstLabels: labels,
})
return propertyReqSummaryMap[domain][prop]
}
// Describe function
func (p *GTMPropertyTrafficExporter) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc(p.PropertyMetricPrefix, "Akamai GTM Property Traffic", nil, nil)
}
// Collect function
func (p *GTMPropertyTrafficExporter) Collect(ch chan<- prometheus.Metric) {
log.Debugf("Entering GTM Property Traffic Collect")
endtime := time.Now().UTC() // Use same current time for all zones
// Collect metrics for each domain and property
for _, domain := range p.GTMConfig.Domains {
log.Debugf("Processing domain %s", domain.Name)
for _, prop := range domain.Properties {
// get last timestamp recorded. make sure diff > 5 mins.
lasttime := p.LastTimestamp[domain.Name][prop.Name].Add(time.Minute)
if endtime.Before(lasttime.Add(time.Minute * 5)) {
lasttime = lasttime.Add(time.Minute * 5)
}
log.Debugf("Fetching property Report for property %s in domain %s.", prop.Name, domain.Name)
propertyTrafficReport, err := retrievePropertyTraffic(domain.Name, prop.Name, lasttime, endtime)
if err != nil {
apierr, ok := err.(client.APIError)
if ok && apierr.Status == 500 {
log.Warnf("Unable to get traffic report for property %s. Internal error ... Skipping.", prop.Name)
continue
}
if ok && apierr.Status == 400 {
log.Warnf("Unable to get traffic report for property %s. Internal ... Skipping.", prop.Name)
log.Errorf("%s", err.Error())
continue
}
log.Errorf("Unable to get traffic report for property %s ... Skipping. Error: %s", prop.Name, err.Error())
continue
}
log.Debugf("Traffic Metadata: [%v]", propertyTrafficReport.Metadata)
for _, reportInstance := range propertyTrafficReport.DataRows {
instanceTimestamp, err := parseTimeString(reportInstance.Timestamp, GTMTrafficLongTimeFormat)
if err != nil {
log.Errorf("Instance timestamp invalid ... Skipping. Error: %s", err.Error())
continue
}
if !instanceTimestamp.After(p.LastTimestamp[domain.Name][prop.Name]) {
log.Debugf("Instance timestamp: [%v]. Last timestamp: [%v]", instanceTimestamp, p.LastTimestamp[domain.Name][prop.Name])
log.Warnf("Attempting to re process report instance: [%v]. Skipping.", reportInstance)
continue
}
// See if we missed an interval. Log warning for low
log.Debugf("Instance timestamp: [%v]. Last timestamp: [%v]", instanceTimestamp, p.LastTimestamp[domain.Name][prop.Name])
if instanceTimestamp.After(p.LastTimestamp[domain.Name][prop.Name].Add(time.Minute * (trafficReportInterval + 1))) {
log.Warnf("Missing report interval. Current: %v, Last: %v", instanceTimestamp, p.LastTimestamp[domain.Name][prop.Name])
}
var aggReqs int64
var baseLabels = []string{"domain", "property"}
for _, instanceDC := range reportInstance.Datacenters {
aggReqs += instanceDC.Requests // aggregate properties in scope
if len(prop.DatacenterIDs) > 0 || len(prop.DCNicknames) > 0 || len(prop.Targets) > 0 {
// create metric instance for properties in scope
var tsLabels []string
var filterVal string
var filterLabel string
if intSliceContains(prop.DatacenterIDs, instanceDC.DatacenterId) {
filterVal = strconv.Itoa(instanceDC.DatacenterId)
filterLabel = "datacenterid"
tsLabels = append(baseLabels, filterLabel)
} else if stringSliceContains(prop.DCNicknames, instanceDC.Nickname) {
filterVal = instanceDC.Nickname
filterLabel = "nickname"
tsLabels = append(baseLabels, filterLabel)
} else if stringSliceContains(prop.Targets, instanceDC.TrafficTargetName) {
filterVal = instanceDC.TrafficTargetName
filterLabel = "target"
tsLabels = append(baseLabels, filterLabel)
}
if filterVal != "" {
// Match!
if p.GTMConfig.TSLabel {
tsLabels = append(tsLabels, "interval_timestamp")
}
ts := instanceTimestamp.Format(time.RFC3339)
desc := prometheus.NewDesc(prometheus.BuildFQName(p.PropertyMetricPrefix, "", "requests_per_interval"), "Number of property requests per 5 minute interval (per domain)", tsLabels, nil)
log.Debugf("Creating Requests metric. Domain: %s, Property: %s, %s: %s, Requests: %v, Timestamp: %v", domain.Name, prop.Name, filterLabel, filterVal, float64(instanceDC.Requests), ts)
var reqsmetric prometheus.Metric
if p.GTMConfig.TSLabel {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(instanceDC.Requests), domain.Name, prop.Name, filterVal, ts)
} else {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(instanceDC.Requests), domain.Name, prop.Name, filterVal)
}
if p.GTMConfig.UseTimestamp != nil && !*p.GTMConfig.UseTimestamp {
ch <- reqsmetric
} else {
ch <- prometheus.NewMetricWithTimestamp(instanceTimestamp, reqsmetric)
}
}
}
} // properties in time interval end
if len(prop.DatacenterIDs) < 1 && len(prop.DCNicknames) < 1 && len(prop.Targets) < 1 {
// No filters. Create agg instance
tsLabels := baseLabels
if p.GTMConfig.TSLabel {
tsLabels = append(tsLabels, "interval_timestamp")
}
ts := instanceTimestamp.Format(time.RFC3339)
desc := prometheus.NewDesc(prometheus.BuildFQName(p.PropertyMetricPrefix, "", "requests_per_interval"), "Number of property requests per 5 minute interval (per domain)", tsLabels, nil)
log.Debugf("Creating Requests metric. Domain: %s, Property: %s, Requests: %v, Timestamp: %v", domain.Name, prop.Name, float64(aggReqs), ts)
var reqsmetric prometheus.Metric
if p.GTMConfig.TSLabel {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(aggReqs), domain.Name, prop.Name, ts)
} else {
reqsmetric = prometheus.MustNewConstMetric(
desc, prometheus.GaugeValue, float64(aggReqs), domain.Name, prop.Name)
}
if p.GTMConfig.UseTimestamp != nil && !*p.GTMConfig.UseTimestamp {
ch <- reqsmetric
} else {
ch <- prometheus.NewMetricWithTimestamp(instanceTimestamp, reqsmetric)
}
}
// Update summary
propertyReqSummaryMap[domain.Name][prop.Name].Observe(float64(aggReqs))
// Update last timestamp processed
if instanceTimestamp.After(p.LastTimestamp[domain.Name][prop.Name]) {
log.Debugf("Updating Last Timestamp from %v TO %v", p.LastTimestamp[domain.Name][prop.Name], instanceTimestamp)
p.LastTimestamp[domain.Name][prop.Name] = instanceTimestamp
}
// only process one each interval!
break
} // interval end
} // property end
} // domain end
}
func retrievePropertyTraffic(domain, prop string, start, end time.Time) (*gtm.PropertyTrafficResponse, error) {
qargs := make(map[string]string)
// Get valid Traffic Window
var err error
propertyTrafficWindow, err := gtm.GetPropertiesTrafficWindow()
if err != nil {
return nil, err
}
// Make sure provided start and end are in range
if propertyTrafficWindow.StartTime.Before(start) {
if propertyTrafficWindow.EndTime.After(start) {
qargs["start"], err = convertTimeFormat(start, time.RFC3339)
} else {
qargs["start"], err = convertTimeFormat(propertyTrafficWindow.EndTime, time.RFC3339)
}
} else {
qargs["start"], err = convertTimeFormat(propertyTrafficWindow.StartTime, time.RFC3339)
}
if err != nil {
return nil, err
}
if propertyTrafficWindow.EndTime.Before(end) {
qargs["end"], err = convertTimeFormat(propertyTrafficWindow.EndTime, time.RFC3339)
} else {
qargs["end"], err = convertTimeFormat(end, time.RFC3339)
}
if err != nil {
return nil, err
}
if qargs["start"] >= qargs["end"] {
resp := >m.PropertyTrafficResponse{}
resp.DataRows = make([]*gtm.PropertyTData, 0)
log.Warnf("Start or End time outside valid report window")
return resp, nil
}
resp, err := gtm.GetTrafficPerProperty(domain, prop, qargs)
if err != nil {
return >m.PropertyTrafficResponse{}, err
}
//DataRows is list of pointers
sortPropertyDataRowsByTimestamp(resp.DataRows)
return resp, nil
}
|
package main
import (
"fmt"
"html/template"
"net/url"
"strconv"
"time"
"github.com/go-macaron/gzip"
"github.com/looyun/feedall/controllers"
"github.com/looyun/feedall/middleware"
"github.com/looyun/feedall/models"
"github.com/looyun/feedall/parse"
macaron "gopkg.in/macaron.v1"
)
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func main() {
models.Init()
m := macaron.Classic()
m.Use(gzip.Gziper())
go parse.Parse()
m.Use(macaron.Renderer(macaron.RenderOptions{
Funcs: []template.FuncMap{map[string]interface{}{
"str2html": func(raw string) template.HTML {
return template.HTML(raw)
},
"UrlParse": func(raw string) string {
return url.QueryEscape(raw)
},
"TimeSince": func(s string) string {
now := time.Now()
i, _ := strconv.ParseInt(s, 10, 64)
then := time.Unix(i, 0)
diff := now.Unix() - then.Unix()
if then.After(now) {
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return "now"
case diff <= 2:
return "1s"
case diff < 1*Minute:
return strconv.FormatInt(diff, 10) + "s"
case diff < 2*Minute:
return "1m"
case diff < 1*Hour:
return strconv.FormatInt(diff/Minute, 10) + "m"
case diff < 2*Hour:
return "1h"
case diff < 1*Day:
return strconv.FormatInt(diff/Hour, 10) + "h"
case diff < 2*Day:
return "1d"
case diff < 1*Week:
return strconv.FormatInt(diff/Day, 10) + "d"
case diff < 2*Week:
return "1w"
default:
return then.Month().String()[:3] + " " + strconv.Itoa(then.Year())
}
},
}},
IndentJSON: true,
}))
m.SetDefaultCookieSecret("feedall")
m.Post("/login", func(ctx *macaron.Context) {
token, err := controllers.Login(ctx)
if err != nil {
fmt.Println(err)
ctx.Error(400, "error")
return
}
ctx.JSON(200, map[string]string{"token": token})
})
m.Post("/signup", func(ctx *macaron.Context) {
err := controllers.Signup(ctx)
if err != nil {
fmt.Println(err)
ctx.Error(400, "error")
} else {
ctx.Status(200)
}
})
m.Post("/item/:feedlink", func(ctx *macaron.Context) {
})
m.Group("/api", func() {
m.Group("/my", func() {
m.Get("/feeds", func(ctx *macaron.Context) {
feeds, err := controllers.GetUserFeeds(ctx)
if err != nil {
fmt.Println(err)
ctx.Error(400, "error")
} else {
ctx.JSON(200, &feeds)
}
})
m.Get("/items", func(ctx *macaron.Context) {
items, err := controllers.GetUserItems(ctx)
if err != nil {
fmt.Println(err)
ctx.Error(400, "error")
} else {
ctx.JSON(200, &items)
}
})
m.Get("/stars", func(ctx *macaron.Context) {
items, err := controllers.GetStarItems(ctx)
if err != nil {
fmt.Println(err)
ctx.Error(400, "error")
} else {
ctx.JSON(200, &items)
}
})
m.Post("/subscribe", func(ctx *macaron.Context) {
err := controllers.Subscribe(ctx)
if err != nil {
fmt.Println(err)
ctx.Error(400, "error")
} else {
ctx.Status(200)
}
})
// m.Post("/del", func(ctx *macaron.Context) {
// if controllers.DelFeed(ctx) {
// fmt.Println("Delete feed succeed!")
// ctx.Redirect("/manage")
// } else {
// fmt.Println("Delete feed false!")
// ctx.Redirect("/manage")
// }
// })
}, middleware.ValidateJWTToken())
m.Get("/feeds/recommand/:n:int", func(ctx *macaron.Context) {
feeds := controllers.GetFeeds(ctx)
ctx.JSON(200, &feeds)
})
m.Get("/feed/:id/", func(ctx *macaron.Context) {
feed := controllers.GetFeed(ctx)
ctx.JSON(200, &feed)
})
m.Get("/feed/:id/items", func(ctx *macaron.Context) {
items := controllers.GetFeedItems(ctx)
ctx.JSON(200, &items)
})
m.Get("/item/:id", func(ctx *macaron.Context) {
item := controllers.GetItem(ctx)
ctx.JSON(200, &item)
})
m.Get("/items/random/:n:int", func(ctx *macaron.Context) {
numbers := ctx.ParamsInt(":n")
items := controllers.GetRandomItem(ctx, numbers)
ctx.JSON(200, &items)
})
m.Get("/items/recommand/:n:int", func(ctx *macaron.Context) {
items := controllers.GetItems(ctx, 5)
ctx.JSON(200, &items)
})
})
m.Run()
}
|
package models
import (
"fmt"
"cloud.google.com/go/storage"
"context"
"api-gaming/internal/config"
)
// GetVideo - Return storage bucket handle data.
func GetVideo() *storage.BucketHandle {
return config.StorageConn()
}
// ReadFile - Read a file from Google Cloud storage.
func ReadFile(fileName string) *storage.Reader {
storage := config.StorageConn()
obj := storage.Object(fileName).ReadCompressed(true)
rc, err := obj.NewReader(context.Background())
if err != nil {
fmt.Println("readFile: unable to open file", err)
}
defer rc.Close()
return rc
}
|
/*
* @lc app=leetcode.cn id=14 lang=golang
*
* [14] 最长公共前缀
*/
package solution
import "strings"
// @lc code=start
func longestCommonPrefix(strs []string) string {
if len(strs) == 0 {
return ""
} else if len(strs) == 1 {
return strs[0]
}
prefix := strings.Builder{}
for i := 0; true; i++ {
if i >= len(strs[0]) {
return prefix.String()
}
b := strs[0][i]
for j := 1; j < len(strs); j++ {
if i >= len(strs[j]) || strs[j][i] != b {
return prefix.String()
}
}
prefix.WriteByte(b)
}
return ""
}
// @lc code=end
|
package string
import (
"strings"
"regexp"
)
/**
算法的实现逻辑可以参考: https://segmentfault.com/a/1190000004881457
设计思想:
将中文数学转换成阿拉伯数字。
将中文权位转换成10的位数。
对每个权位依次转换成位数并求和。
零直接忽略即可。
解决的问题:
1.一旦字符串中 含有非数字 非数学单位的文字 解析会出问题 已解决
2.字符串为十,解析出问题。直接判断第0个位置如果是权重单位,则特殊处理 已解决
*/
var chNumChar = map[string]int{
"零": 0, "一": 1, "二": 2, "三": 3, "四": 4, "五": 5, "六": 6, "七": 7, "八": 8, "九": 9,
"壹": 1, "贰": 2, "叁": 3, "肆": 4, "伍": 5, "陆": 6, "柒": 7, "捌": 8, "玖": 9,
"两": 2,
}
type valueObj struct {
value int
secUnit bool
}
var chNumMapValue = map[string]valueObj{
"十": valueObj{10, false},
"百": valueObj{100, false},
"千": valueObj{1000, false},
"万": valueObj{10000, true},
"亿": valueObj{100000000, true},
}
const ChineseNumPattern = `([零一二三四五六七八九壹贰叁肆伍陆柒捌玖两十百千万亿]+)`
/**
中文数字转阿拉伯数字
第二百三十六->236
1.先将字符串通过空字符串分隔开,成为数组,针对单个文字进行处理
2.如果是数字,则转为数字
3.如果是"数字位",则将之前的数字乘以权重 与后续的section进行相加
*/
func Chinese2Int(originStr string) int {
//在原字符串中,将中文数字转换为阿拉伯数字
var dealStr string;
dealStr = originStr;
//将字符串中跟数字无关的字符去除
re := regexp.MustCompile(ChineseNumPattern)
result := re.FindAllStringSubmatch(dealStr, 1)
dealStr = result[0][1]
dealStrArr := strings.Split(dealStr, "")
var number, sectionNum, rtn int;
for index, val := range dealStrArr {
oneNum, isOk := chNumChar[val]
//中文转数字是否匹配上,没匹配上说明是单位或者其他文字
if isOk {
number = oneNum
//如果是最后一位,是各位,只需加上它
if (index) == (len(dealStrArr) - 1) {
sectionNum += oneNum
}
} else {
_, isOk := chNumMapValue[val]
if !isOk {
continue
}
var unit = chNumMapValue[val].value
var secUnit = chNumMapValue[val].secUnit
if secUnit {
sectionNum = (sectionNum + number) * unit
rtn += sectionNum
sectionNum = 0
} else {
//如果第一个文字就是数学单位,则直接使用1*对应的数学单位
if index == 0 {
sectionNum += 1 * unit
} else {
sectionNum += number * unit
}
}
number = 0
}
}
return rtn + sectionNum
}
|
package main
import "fmt"
// 236. 二叉树的最近公共祖先
// 给定一个二叉树, 找到该树中两个指定节点的最近公共祖先。
// 百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,满足 x 是 p、q 的祖先且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
// 说明:
// 所有节点的值都是唯一的。
// p、q 为不同节点且均存在于给定的二叉树中。
// https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-tree/
func main() {
p := &TreeNode{Val: 5}
q := &TreeNode{Val: 0}
tree := &TreeNode{
Val: 3,
Left: p,
Right: &TreeNode{
Val: 1,
Left: q,
},
}
fmt.Println(lowestCommonAncestor(tree, p, q))
fmt.Println(lowestCommonAncestor2(tree, p, q))
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 法一:递归
// best
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
// 终结条件
if root == nil {
return nil
} else if root == p || root == q {
return root
}
// 处理当前层
left := lowestCommonAncestor(root.Left, p, q)
right := lowestCommonAncestor(root.Right, p, q)
if left == nil {
return right
}
if right == nil {
return left
}
// 如果left和right均不为空,说明p、q分别在root的两侧子树
return root
}
// 法二:使用一个map保存节点的父节点
func lowestCommonAncestor2(root, p, q *TreeNode) *TreeNode {
if root == p || root == q {
return root
}
parent := make(map[int]*TreeNode)
getParentNode(root, &parent)
visited := make(map[int]bool)
for p != nil {
visited[p.Val] = true
p = parent[p.Val]
}
for q != nil {
if visited[q.Val] {
return q
}
q = parent[q.Val]
}
return nil
}
// key是节点的值,value是它的父节点
func getParentNode(root *TreeNode, parent *map[int]*TreeNode) {
if root == nil {
return
}
if root.Left != nil {
(*parent)[root.Left.Val] = root
getParentNode(root.Left, parent)
}
if root.Right != nil {
(*parent)[root.Right.Val] = root
getParentNode(root.Right, parent)
}
}
|
package service
import (
"gin-vue-admin/global"
"gin-vue-admin/model"
"gin-vue-admin/model/request"
"gin-vue-admin/model/response"
)
// @title CreateTitUser
// @description create a TitUser
// @param user model.TitUser
// @auth (2020/04/05 20:22)
// @return err error
func CreateTitUser(user model.TitUser) (err error) {
err = global.GVA_DB.Create(&user).Error
return err
}
// @title DeleteTitUser
// @description delete a TitUser
// @auth (2020/04/05 20:22)
// @param user model.TitUser
// @return error
func DeleteTitUser(user model.TitUser) (err error) {
err = global.GVA_DB.Delete(user).Error
return err
}
// @title UpdateTitUser
// @description update a TitUser
// @param user *model.TitUser
// @auth (2020/04/05 20:22)
// @return error
func UpdateTitUser(user *model.TitUser) (err error) {
err = global.GVA_DB.Save(user).Error
return err
}
// @title GetTitUser
// @description get the info of a TitUser
// @auth (2020/04/05 20:22)
// @param id uint
// @return error
// @return TitUser TitUser
func GetTitUser(id uint) (err error, user model.TitUser) {
err = global.GVA_DB.Where("id = ?", id).First(&user).Error
return
}
// @title GetTitUserInfoList
// @description get TitUser list by pagination, 分页获取用户列表
// @auth (2020/04/05 20:22)
// @param info PageInfo
// @return error
func GetTitUserInfoList(info request.PageInfo) (err error, list interface{}, total int) {
limit := info.PageSize
offset := info.PageSize * (info.Page - 1)
db := global.GVA_DB
var users []response.TitUserInfoForBackend
var titUser []model.TitUser
err = db.Find(&titUser).Count(&total).Error
//err = db.Order("created_at desc").Limit(limit).Offset(offset).Find(&users).Error
err = db.Order("u.created_at desc").Limit(limit).Offset(offset).Table("tit_users u").Select("distinct u.id, u.created_at, u.username, u.gender, u.birthday, u.telphone, uta.topic_option_ids change_job_option").Joins("left join tit_user_topic_answers uta on u.id = uta.tit_user_id and u.job_info_batch_num = uta.batch_num and uta.tit_topic_id = 12").Where("u.deleted_at is null").Scan(&users).Error
return err, users, total
}
func FindTitUserByPhone(telphone string) (err error, u model.TitUser) {
db := global.GVA_DB
err = db.First(&u, " telphone = ?", telphone).Error
return
}
func ModifyTitUser(user *model.TitUser) (err error) {
db := global.GVA_DB
err = db.Model(&user).Update(user).Error
return
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package client
import (
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/CS-SI/SafeScale/broker/utils"
)
// Session units the different resources proposed by brokerd as broker client
type Session struct {
Bucket *bucket
Host *host
Share *share
Network *network
Ssh *ssh
Tenant *tenant
Volume *volume
Template *template
Image *image
brokerdHost string
brokerdPort int
connection *grpc.ClientConn
tenantName string
}
// Client is a instance of Session used temporarily until the session logic in brokerd is implemented
type Client *Session
// DefaultTimeout tells to use the timeout by default depending on context
const (
DefaultConnectionTimeout = 30 * time.Second
DefaultExecutionTimeout = 5 * time.Minute
)
// New returns an instance of broker Client
func New() Client {
s := &Session{
brokerdHost: "localhost",
brokerdPort: 50051,
}
s.Bucket = &bucket{session: s}
s.Host = &host{session: s}
s.Share = &share{session: s}
s.Network = &network{session: s}
s.Ssh = &ssh{session: s}
s.Tenant = &tenant{session: s}
s.Volume = &volume{session: s}
s.Template = &template{session: s}
s.Image = &image{session: s}
return s
}
// Connect establishes connection with brokerd
func (s *Session) Connect() {
if s.connection == nil {
s.connection = utils.GetConnection(s.brokerdHost, s.brokerdPort)
}
}
// Disconnect cuts the connection with brokerd
func (s *Session) Disconnect() {
if s.connection != nil {
s.connection.Close()
s.connection = nil
}
}
// DecorateError changes the error to something more comprehensible when
// timeout occured
func DecorateError(err error, action string, maySucceed bool) error {
if IsTimeout(err) {
msg := "%s took too long (> %v) to respond"
if maySucceed {
msg += " (may eventually succeed)"
}
return fmt.Errorf(msg, action, DefaultExecutionTimeout)
}
msg := err.Error()
if strings.Index(msg, "desc = ") != -1 {
pos := strings.Index(msg, "desc = ") + 7
msg = msg[pos:]
if strings.Index(msg, " :") == 0 {
msg = msg[2:]
}
return errors.New(msg)
}
return err
}
// IsTimeout tells if the err is a timeout kind
func IsTimeout(err error) bool {
return status.Code(err) == codes.DeadlineExceeded
}
|
package authorize
import (
"context"
"strings"
envoy_service_auth_v3 "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/rs/zerolog"
"github.com/pomerium/pomerium/authorize/evaluator"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/telemetry/requestid"
"github.com/pomerium/pomerium/internal/telemetry/trace"
"github.com/pomerium/pomerium/pkg/grpc/audit"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/grpc/session"
"github.com/pomerium/pomerium/pkg/grpc/user"
"github.com/pomerium/pomerium/pkg/grpcutil"
"github.com/pomerium/pomerium/pkg/storage"
)
func (a *Authorize) logAuthorizeCheck(
ctx context.Context,
in *envoy_service_auth_v3.CheckRequest, out *envoy_service_auth_v3.CheckResponse,
res *evaluator.Result, s sessionOrServiceAccount, u *user.User,
) {
ctx, span := trace.StartSpan(ctx, "authorize.grpc.LogAuthorizeCheck")
defer span.End()
hdrs := getCheckRequestHeaders(in)
impersonateDetails := a.getImpersonateDetails(ctx, s)
evt := log.Info(ctx).Str("service", "authorize")
fields := a.currentOptions.Load().GetAuthorizeLogFields()
for _, field := range fields {
evt = populateLogEvent(ctx, field, evt, in, s, u, hdrs, impersonateDetails)
}
evt = log.HTTPHeaders(evt, fields, hdrs)
// result
if res != nil {
evt = evt.Bool("allow", res.Allow.Value)
if res.Allow.Value {
evt = evt.Strs("allow-why-true", res.Allow.Reasons.Strings())
} else {
evt = evt.Strs("allow-why-false", res.Allow.Reasons.Strings())
}
evt = evt.Bool("deny", res.Deny.Value)
if res.Deny.Value {
evt = evt.Strs("deny-why-true", res.Deny.Reasons.Strings())
} else {
evt = evt.Strs("deny-why-false", res.Deny.Reasons.Strings())
}
}
evt.Msg("authorize check")
if enc := a.state.Load().auditEncryptor; enc != nil {
ctx, span := trace.StartSpan(ctx, "authorize.grpc.AuditAuthorizeCheck")
defer span.End()
record := &audit.Record{
Request: in,
Response: out,
}
sealed, err := enc.Encrypt(record)
if err != nil {
log.Warn(ctx).Err(err).Msg("authorize: error encrypting audit record")
return
}
log.Info(ctx).
Str("request-id", requestid.FromContext(ctx)).
EmbedObject(sealed).
Msg("audit log")
}
}
type impersonateDetails struct {
email string
sessionID string
userID string
}
func (a *Authorize) getImpersonateDetails(
ctx context.Context,
s sessionOrServiceAccount,
) *impersonateDetails {
var sessionID string
if s, ok := s.(*session.Session); ok {
sessionID = s.GetImpersonateSessionId()
}
if sessionID == "" {
return nil
}
querier := storage.GetQuerier(ctx)
req := &databroker.QueryRequest{
Type: grpcutil.GetTypeURL(new(session.Session)),
Limit: 1,
}
req.SetFilterByID(sessionID)
res, err := querier.Query(ctx, req)
if err != nil || len(res.GetRecords()) == 0 {
return nil
}
impersonatedSessionMsg, err := res.GetRecords()[0].GetData().UnmarshalNew()
if err != nil {
return nil
}
impersonatedSession, ok := impersonatedSessionMsg.(*session.Session)
if !ok {
return nil
}
userID := impersonatedSession.GetUserId()
req = &databroker.QueryRequest{
Type: grpcutil.GetTypeURL(new(user.User)),
Limit: 1,
}
req.SetFilterByID(userID)
res, err = querier.Query(ctx, req)
if err != nil || len(res.GetRecords()) == 0 {
return nil
}
impersonatedUserMsg, err := res.GetRecords()[0].GetData().UnmarshalNew()
if err != nil {
return nil
}
impersonatedUser, ok := impersonatedUserMsg.(*user.User)
if !ok {
return nil
}
email := impersonatedUser.GetEmail()
return &impersonateDetails{
sessionID: sessionID,
userID: userID,
email: email,
}
}
func populateLogEvent(
ctx context.Context,
field log.AuthorizeLogField,
evt *zerolog.Event,
in *envoy_service_auth_v3.CheckRequest,
s sessionOrServiceAccount,
u *user.User,
hdrs map[string]string,
impersonateDetails *impersonateDetails,
) *zerolog.Event {
path, query, _ := strings.Cut(in.GetAttributes().GetRequest().GetHttp().GetPath(), "?")
switch field {
case log.AuthorizeLogFieldCheckRequestID:
return evt.Str(string(field), hdrs["X-Request-Id"])
case log.AuthorizeLogFieldEmail:
return evt.Str(string(field), u.GetEmail())
case log.AuthorizeLogFieldHost:
return evt.Str(string(field), in.GetAttributes().GetRequest().GetHttp().GetHost())
case log.AuthorizeLogFieldIDToken:
if s, ok := s.(*session.Session); ok {
evt = evt.Str(string(field), s.GetIdToken().GetRaw())
}
return evt
case log.AuthorizeLogFieldIDTokenClaims:
if s, ok := s.(*session.Session); ok {
if t, err := jwt.ParseSigned(s.GetIdToken().GetRaw()); err == nil {
var m map[string]any
_ = t.UnsafeClaimsWithoutVerification(&m)
evt = evt.Interface(string(field), m)
}
}
return evt
case log.AuthorizeLogFieldImpersonateEmail:
if impersonateDetails != nil {
evt = evt.Str(string(field), impersonateDetails.email)
}
return evt
case log.AuthorizeLogFieldImpersonateSessionID:
if impersonateDetails != nil {
evt = evt.Str(string(field), impersonateDetails.sessionID)
}
return evt
case log.AuthorizeLogFieldImpersonateUserID:
if impersonateDetails != nil {
evt = evt.Str(string(field), impersonateDetails.userID)
}
return evt
case log.AuthorizeLogFieldIP:
return evt.Str(string(field), in.GetAttributes().GetSource().GetAddress().GetSocketAddress().GetAddress())
case log.AuthorizeLogFieldMethod:
return evt.Str(string(field), in.GetAttributes().GetRequest().GetHttp().GetMethod())
case log.AuthorizeLogFieldPath:
return evt.Str(string(field), path)
case log.AuthorizeLogFieldQuery:
return evt.Str(string(field), query)
case log.AuthorizeLogFieldRequestID:
return evt.Str(string(field), requestid.FromContext(ctx))
case log.AuthorizeLogFieldServiceAccountID:
if sa, ok := s.(*user.ServiceAccount); ok {
evt = evt.Str(string(field), sa.GetId())
}
return evt
case log.AuthorizeLogFieldSessionID:
if s, ok := s.(*session.Session); ok {
evt = evt.Str(string(field), s.GetId())
}
return evt
case log.AuthorizeLogFieldUser:
return evt.Str(string(field), u.GetId())
default:
return evt
}
}
|
package mysql
import (
"database/sql"
"encoding/json"
"github.com/bearname/videohost/internal/common/db"
"github.com/bearname/videohost/internal/videoserver/domain"
dto2 "github.com/bearname/videohost/internal/videoserver/domain/dto"
"github.com/bearname/videohost/internal/videoserver/domain/model"
log "github.com/sirupsen/logrus"
)
type VideoRepository struct {
connector db.Connector
}
func NewMysqlVideoRepository(connector db.Connector) *VideoRepository {
m := new(VideoRepository)
m.connector = connector
return m
}
func (r *VideoRepository) Create(userId string, videoId string, title string, description string, url string, chapters []dto2.ChapterDto) error {
if (chapters != nil && len(chapters) == 0) || chapters == nil {
return r.insertWithoutChapter(userId, videoId, title, description, url)
}
return r.insertWithChapter(userId, videoId, title, description, url, chapters)
}
func (r *VideoRepository) insertWithChapter(userId string, videoId string, title string, description string, url string, chapters []dto2.ChapterDto) error {
var values []interface{}
createChapterQuery := "INSERT INTO video_chapter (id_video, title, start, end) VALUES "
for _, chapter := range chapters {
createChapterQuery += "(?, ?, ?, ?), "
values = append(values, videoId, chapter.Title, chapter.Start, chapter.End)
}
createChapterQuery = createChapterQuery[0 : len(createChapterQuery)-2]
createChapterQuery += ";"
err := db.WithTransaction(r.connector.GetDb(), func(tx db.Transaction) error {
createVideoQuery := "INSERT INTO video (id_video, title, description, url, owner_id) VALUE (?, ?, ?, ?, ?); "
_, err := tx.Exec(createVideoQuery, videoId, title, description, url, userId)
if err != nil {
return err
}
_, err = tx.Exec(createChapterQuery, values...)
if err != nil {
return err
}
return nil
})
if err != nil {
log.Error(err.Error())
return err
}
return nil
}
func (r *VideoRepository) insertWithoutChapter(userId string, videoId string, title string, description string, url string) error {
query := "INSERT INTO video (id_video, title, description, url, owner_id) VALUE (?, ?, ?, ?, ?);"
_, err := r.connector.GetDb().Query(query, videoId,
title,
description,
url,
userId)
if err != nil {
log.Info(err.Error())
return err
}
return nil
}
func (r *VideoRepository) Save(video *model.Video) error {
query := `INSERT INTO video (id_video, title, description, duration, status, thumbnail_url, url, uploaded, quality, owner_id)
VALUE (?, ?, ?, ?, ?, ?, ?, ?, ?, ?);`
_, err := r.connector.GetDb().Query(query, video.Id,
video.Name,
video.Description,
video.Duration,
video.Status,
video.Thumbnail,
video.Url,
video.Uploaded,
video.Quality,
video.OwnerId)
if err != nil {
log.Error(err.Error())
return err
}
return nil
}
func (r *VideoRepository) Find(videoId string) (*model.Video, error) {
var video model.Video
q := `SELECT id_video,
video.title AS video_title,
description AS video_description,
duration AS video_duration,
thumbnail_url AS video_thumbnail_url,
url AS video_url,
uploaded AS video_uploaded,
quality AS video_quality,
views AS video_views,
owner_id AS video_owner_id,
status AS video_status,
GROUP_CONCAT(CONCAT('{"title":"', vc.title, '","start":', vc.start, ',"end":', vc.end, '}') SEPARATOR ',') AS video_chapters
FROM video
LEFT JOIN video_chapter vc ON id_video = vc.video_id
WHERE id_video = ?
GROUP BY id_video;`
row := r.connector.GetDb().QueryRow(q, videoId)
var chapterString sql.NullString
err := row.Scan(
&video.Id,
&video.Name,
&video.Description,
&video.Duration,
&video.Thumbnail,
&video.Url,
&video.Uploaded,
&video.Quality,
&video.Views,
&video.OwnerId,
&video.Status,
&chapterString,
)
if err != nil {
log.Error(err)
return nil, err
}
var chapters []model.Chapter
if chapterString.Valid {
chapters, err = r.parseChapter(chapterString.String)
if err != nil {
return nil, err
}
}
video.Chapters = chapters
q = `SELECT SUM(IF(isLike = 0, 1, 0)) AS video_dislikes,
SUM(IF(isLike = 1, 1, 0)) AS video_likes
FROM video_like
WHERE id_video = ?`
rows, err := r.connector.GetDb().Query(q, videoId)
if err != nil {
return nil, err
}
defer rows.Close()
var countLikes sql.NullInt64
var countDisLikes sql.NullInt64
err = row.Scan(
&countDisLikes,
&countLikes)
video.CountLikes = int(countLikes.Int64)
video.CountDisLikes = int(countDisLikes.Int64)
return &video, err
}
func (r *VideoRepository) setChapter(chapterString string, err error, video model.Video) (model.Video, error) {
var chapters []model.Chapter
if len(chapterString) > 0 {
chapters, err = r.parseChapter(chapterString)
if err != nil {
return video, err
}
}
video.Chapters = chapters
return video, nil
}
func (r *VideoRepository) parseChapter(chapterString string) ([]model.Chapter, error) {
var chapters []model.Chapter
chapterString = "[" + chapterString + "]"
err := json.Unmarshal([]byte(chapterString), &chapters)
if err != nil {
return nil, err
}
return chapters, nil
}
func (r *VideoRepository) Update(videoId string, title string, description string) error {
rows, err := r.connector.GetDb().Query("UPDATE video SET title=?, description=? WHERE id_video=?;", title, description, videoId)
if err != nil {
log.Error(err.Error())
return err
}
defer rows.Close()
return nil
}
func (r *VideoRepository) Delete(videoId string) error {
rows, err := r.connector.GetDb().Query("DELETE FROM video WHERE id_video=?;", videoId)
if err != nil {
return err
}
defer rows.Close()
return nil
}
func (r *VideoRepository) FindVideosByPage(page int, count int) ([]model.VideoListItem, error) {
offset := (page) * count
query := "SELECT id_video, title, duration, thumbnail_url, uploaded, views, status, quality FROM video WHERE status=3 LIMIT ?, ?;"
rows, err := r.connector.GetDb().Query(query, offset, count)
return r.getVideoListItem(rows, err)
}
func (r *VideoRepository) GetPageCount(countVideoOnPage int) (int, bool) {
query := "SELECT COUNT(id_video) AS countReadyVideo FROM video WHERE status=3;"
rows, err := r.connector.GetDb().Query(query)
if err != nil {
return 0, false
}
defer rows.Close()
var countVideo int
for rows.Next() {
err = rows.Scan(
&countVideo,
)
if err != nil {
return 0, false
}
}
countPage := countVideo / countVideoOnPage
if countVideo%countVideoOnPage > 0 {
countPage += 1
}
return countPage, true
}
func (r *VideoRepository) AddVideoQuality(videoId string, quality string) error {
query := "UPDATE video SET `quality` = concat(quality, concat(',', ?)) WHERE id_video = ?;"
rows, err := r.connector.GetDb().Query(query, quality, videoId)
if err != nil {
return err
}
defer rows.Close()
return nil
}
func (r *VideoRepository) SearchVideo(searchString string, page int, count int) ([]model.VideoListItem, error) {
offset := (page - 1) * count
query := `SELECT id_video, title, duration, thumbnail_url, uploaded, views, status, quality FROM video
WHERE MATCH(video.title) AGAINST (? IN NATURAL LANGUAGE MODE) AND status=3 LIMIT ?, ?;`
rows, err := r.connector.GetDb().Query(query, searchString, offset, count)
return r.getVideoListItem(rows, err)
}
func (r *VideoRepository) IncrementViews(id string) bool {
query := "UPDATE video SET video.views = video.views + 1 WHERE id_video=?"
rows, err := r.connector.GetDb().Query(query, id)
if err != nil {
log.Info(err.Error())
return false
}
defer rows.Close()
return true
}
func (r *VideoRepository) FindUserVideos(userId string, dto dto2.SearchDto) ([]model.VideoListItem, error) {
offset := (dto.Page) * dto.Count
query := "SELECT video.id_video, title, duration, thumbnail_url, uploaded, views, status, quality FROM video WHERE owner_id=? LIMIT ?, ?;"
rows, err := r.connector.GetDb().Query(query, userId, offset, dto.Count)
return r.getVideoListItem(rows, err)
}
func (r *VideoRepository) Like(like model.Like) (model.Action, error) {
query := `SELECT isLike FROM video_like WHERE id_video = ? AND owner_id= ?;`
rows, err := r.connector.GetDb().Query(query, like.IdVideo, like.OwnerId)
if err == nil {
defer rows.Close()
var isLike bool
if rows.Next() {
err = rows.Scan(&isLike)
if err != nil {
return 0, domain.ErrInternal
}
if isLike == like.IsLike {
err = r.DeleteLike(like)
if err != nil {
return model.DeleteLike, domain.ErrFailedDeleteLike
}
if isLike {
return model.DeleteLike, nil
} else {
return model.DeleteDisLike, nil
}
} else {
if isLike {
return 0, domain.ErrAlreadyLike
} else {
return 0, domain.ErrAlreadyDisLike
}
}
}
}
query = `INSERT INTO video_like (id_video, owner_id, isLike)
VALUES (?, ?, ?)
ON DUPLICATE KEY UPDATE isLike=?;`
rows, err = r.connector.GetDb().Query(query, like.IdVideo, like.OwnerId, like.IsLike, like.IsLike)
if err != nil {
return 0, domain.ErrFailedAddLike
}
defer rows.Close()
if like.IsLike {
return model.AddLike, nil
} else {
return model.AddDislike, nil
}
}
func (r *VideoRepository) FindLikedByUser(userId string, page db.Page) ([]model.VideoListItem, error) {
offset := (page.Number) * page.Size
query := `SELECT ids.id_video,
title,
duration,
thumbnail_url,
uploaded,
views,
status,
quality FROM (SELECT id_video FROM video_like WHERE owner_id = ? LIMIT ?, ?) as ids LEFT JOIN video v ON ids.id_video = v.id_video`
rows, err := r.connector.GetDb().Query(query, userId, offset, page.Size)
return r.getVideoListItem(rows, err)
}
func (r *VideoRepository) DeleteLike(like model.Like) error {
query := `DELETE FROM video_like WHERE id_video = ? AND owner_id= ?;`
_, err := r.connector.GetDb().Query(query, like.IdVideo, like.OwnerId)
return err
}
func (r *VideoRepository) getVideoListItem(rows *sql.Rows, err error) ([]model.VideoListItem, error) {
if err != nil {
return nil, err
}
defer rows.Close()
videos := make([]model.VideoListItem, 0)
for rows.Next() {
var videoListItem model.VideoListItem
err = rows.Scan(
&videoListItem.Id,
&videoListItem.Name,
&videoListItem.Duration,
&videoListItem.Thumbnail,
&videoListItem.Uploaded,
&videoListItem.Views,
&videoListItem.Status,
&videoListItem.Quality,
)
if err != nil {
return nil, err
}
videos = append(videos, videoListItem)
}
return videos, nil
}
|
package router
import (
"fmt"
"github.com/ijidan/jgo/controller"
"github.com/ijidan/jgo/jgo/jlogger"
"github.com/ijidan/jgo/jgo/jrouter"
"net/http"
)
const HttpHost = "127.0.0.1"
const HttpPort = int64(8080)
//注册n
func Registry() {
//控制器
index := controller.IndexController{}
user := controller.UserController{}
chat := controller.ChatController{}
admin := controller.AdminIndexController{}
client := controller.ClientController{}
server := controller.ServerController{}
service:=controller.ServiceController{}
//路由实例
ins := jrouter.NewJRouter()
//用户前台
ins.Group("front", func() {
//用户前台
ins.Any("/", index.Index)
ins.Any("/call", index.Call)
ins.Any("/user/reg", user.Reg)
ins.Any("/user/login", user.Login)
ins.Any("/user/info", user.Info)
ins.Any("/user/logout", user.Logout)
ins.Any("/chat/index", chat.Index)
}).Add(LogMiddleware, BridgeAccountCookieSyncMiddleware)
//管理后台
ins.Group("backend", func() {
ins.Any("/admin", admin.Index)
ins.Any("/admin/defaultIdx", admin.DefaultIdx)
ins.Any("/admin/serverList", admin.ServerList)
ins.Any("/admin/clientList", admin.ClientList)
}).Add(LogMiddleware)
//API接口
ins.Group("api", func() {
ins.Any("/api/client/getAll", client.GetAll)
ins.Any("/api/client/sendMessage", client.SendMessage)
ins.Any("/api/client/kickOff", client.KickOff)
ins.Any("/api/server/getAll", server.GetAll)
ins.Any("/api/server/batchSendMessage", server.BatchSendMessage)
ins.Any("/api/server/closeServer", server.CloseServer)
}).Add(verifyTokenMiddleware)
//Service
ins.Group("service", func() {
ins.Any("/service/consulCheck", service.ConsulCheck)
})
}
//HTTP开启
const staticPath = "static/"
const staticUrlPrefix = "/static/"
//new mux
func newMux() http.Handler {
//处理
mux := http.NewServeMux()
ins := jrouter.NewJRouter()
allRoute := ins.GetAll() //获取所有路由
for k, v := range allRoute {
mux.Handle(k, v)
}
fs := http.FileServer(http.Dir(staticPath))
mux.Handle(staticUrlPrefix, http.StripPrefix(staticUrlPrefix, fs))
return mux
}
//开启HTTP服务
func StartHttpServer() {
//HTTP服务
Registry()
mux:=newMux()
address := fmt.Sprintf("%s:%d", HttpHost, HttpPort)
err := http.ListenAndServe(address, mux)
if err != nil {
jlogger.Error(err.Error())
}
}
|
package types
import (
"testing"
"github.com/stretchr/testify/require"
sdk "github.com/irisnet/irishub/types"
)
func TestValidateParams(t *testing.T) {
// check that valid case work
defaultParams := DefaultParams()
err := ValidateParams(defaultParams)
require.Nil(t, err)
// all cases should return an error
invalidTests := []struct {
name string
params Params
result bool
}{
{"fee == 0 ", NewParams(sdk.ZeroRat()), false},
{"fee < 1", NewParams(sdk.NewRat(1000, 100)), false},
{"fee numerator < 0", NewParams(sdk.NewRat(-1, 10)), false},
{"fee denominator < 0", NewParams(sdk.NewRat(1, -10)), false},
}
for _, tc := range invalidTests {
t.Run(tc.name, func(t *testing.T) {
err := ValidateParams(tc.params)
if err != nil {
require.False(t, tc.result)
} else {
require.True(t, tc.result)
}
})
}
}
|
package gbinterface
type IMessage interface {
GetMsgId() uint32
GetMessgLen() uint32
GetData() []byte
SetMsgId(uint32)
SetData([]byte)
SetDataLen(uint32)
}
|
package ama
import (
"log"
"net/http"
"io/ioutil"
"github.com/itsabot/abot/shared/datatypes"
"github.com/itsabot/abot/shared/nlp"
"github.com/itsabot/abot/shared/plugin"
)
var p *dt.Plugin
func init() {
// Abot should route messages to this plugin that contain any combination
// of the below words. The stems of the words below are used, so you don't
// need to include duplicates (e.g. there's no need to include both "stock"
// and "stocks"). Everything will be lowercased as well, so there's no
// difference between "ETF" and "etf".
trigger := &nlp.StructuredInput{
Commands: []string{"yo", "hey"},
Objects: []string{"wiki"},
}
// Tell Abot how this plugin will respond to new conversations and follow-up
// requests.
fns := &dt.PluginFns{Run: Run, FollowUp: FollowUp}
// Create the plugin.
var err error
pluginPath := "github.com/BBBBlarry/plugin_ama"
p, err = plugin.New(pluginPath, trigger, fns)
if err != nil {
log.Fatalln("building", err)
}
p.Vocab = dt.NewVocab(
dt.VocabHandler{
Fn: kwQueryAlpha,
Trigger: &nlp.StructuredInput{
Commands: []string{"yo", "hey"},
Objects: []string{"wiki"},
},
},
)
}
// Abot calls Run the first time a user interacts with a plugin
func Run(in *dt.Msg) (string, error) {
return FollowUp(in)
}
// Abot calls FollowUp every subsequent time a user interacts with the plugin
// as long as the messages hit this plugin consecutively. As soon as Abot sends
// a message for this user to a different plugin, this plugin's Run function
// will be called the next it's triggered. This Run/FollowUp design allows us
// to reset a plugin's state when a user changes conversations.
func FollowUp(in *dt.Msg) (string, error) {
//return QueryAlpha(in), nil
return p.Vocab.HandleKeywords(in), nil
}
func kwQueryAlpha(in *dt.Msg) (resp string){
res, err := http.Get("http://www.google.com/robots.txt")
if err != nil {
log.Fatal(err)
}
robots, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Fatal(err)
}
return string(robots)
}
|
//funcoes variativas: recebem parametros variaveis
package main
import "fmt"
func printAprovados(aprovados ...string) {
for _, aluno := range aprovados {
fmt.Println(aluno)
}
}
func main() {
aprovados := []string{"ytallo", "gabriel", "pessoa", "leda"} //slice=nao definimos tamanho
printAprovados(aprovados...)
}
|
package main
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"opentsp.org/internal/config"
"opentsp.org/internal/flag"
"opentsp.org/internal/relay"
"opentsp.org/internal/restart"
"opentsp.org/internal/tsdb/filter"
"opentsp.org/internal/validate"
)
type Config struct {
Filter []filter.Rule `config:"dynamic"`
Relay map[string]*relay.Config `config:"dynamic"`
CollectPath string
LogPath string
}
func load(path string) *Config {
if flag.DebugMode {
config.Debug = log.New(os.Stderr, "debug: config: ", 0)
}
cfg := new(Config)
config.Load(cfg, path, "tsp-poller?host={{.Hostname}}")
if flag.TestMode {
cfg.Dump(os.Stdout)
os.Exit(0)
}
go func() {
dummy := new(Config)
config.Next(dummy)
restartCause <- "config updated"
}()
return cfg
}
func (c *Config) Reset() { *c = *defaultConfig }
func (c *Config) Dump(w io.Writer) {
buf, _ := json.MarshalIndent(c, "", "\t")
fmt.Fprintln(w, string(buf))
}
func (c *Config) Validate() error {
var err error
c.Filter, err = validate.Filter(c.Filter)
if err != nil {
return err
}
if err := validate.Relay(c.Relay); err != nil {
return err
}
return nil
}
var restartCause = make(chan string)
func Restart(shutdown func()) {
cause := <-restartCause
log.Printf("restarting... (%s)", cause)
shutdown()
restart.Do()
}
|
// Package teststruct - myteststruct.go
package teststruct
// MyTestStruct test
type MyTestStruct struct {
X int
Y int
}
// SetValues - Call a method to do stuff
func (m *MyTestStruct) SetValues(x, y int) {
m.X = x
m.Y = y
}
// Add - add the two values
func (m MyTestStruct) Add() int {
return m.X + m.Y
}
// Multiply - multiply the two values
func (m MyTestStruct) Multiply() int {
return m.X * m.Y
}
// MyOutput - test output to adhear to MyTest interface
func (m MyTestStruct) MyOutput() string {
return "output from MyTestStruct"
}
// MyTestStruct2 test 2
type MyTestStruct2 struct {
//
}
|
package lockservice
// RPC definitions for a simple lock service.
// Lock(lockname) returns OK=true if the lock is not held.
// If it is held, it returns OK=false immediately.
type LockArgs struct {
// Go's net/rpc requires that these field
// names start with upper case letters!
Lockname string // lock name
UUID string
}
type LockReply struct {
OK bool
UUID string
}
// Unlock(lockname) returns OK=true if the lock was held.
// It returns OK=false if the lock was not held.
type UnlockArgs struct {
Lockname string
UUID string
}
type UnlockReply struct {
OK bool
UUID string
}
|
package cage
import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"os"
"path/filepath"
)
type Envars struct {
_ struct{} `type:"struct"`
Region *string `json:"region" type:"string"`
Cluster *string `json:"cluster" type:"string" required:"true"`
Service *string `json:"service" type:"string" required:"true"`
CanaryService *string
TaskDefinitionBase64 *string `json:"nextTaskDefinitionBase64" type:"string"`
TaskDefinitionArn *string `json:"nextTaskDefinitionArn" type:"string"`
ServiceDefinitionBase64 *string
}
// required
const ClusterKey = "CAGE_CLUSTER"
const ServiceKey = "CAGE_SERVICE"
// either required
const ServiceDefinitionBase64Key = "CAGE_SERVICE_DEFINITION_BASE64"
const TaskDefinitionBase64Key = "CAGE_TASK_DEFINITION_BASE64"
const TaskDefinitionArnKey = "CAGE_TASK_DEFINITION_ARN"
const kDefaultRegion = "us-west-2"
// optional
const CanaryServiceKey = "CAGE_CANARY_SERVICE"
const RegionKey = "CAGE_REGION"
func isEmpty(o *string) bool {
return o == nil || *o == ""
}
func EnsureEnvars(
dest *Envars,
) (error) {
// required
if isEmpty(dest.Cluster) {
return NewErrorf("--cluster [%s] is required", ClusterKey)
} else if isEmpty(dest.Service) {
return NewErrorf("--service [%s] is required", ServiceKey)
}
if isEmpty(dest.TaskDefinitionArn) && isEmpty(dest.TaskDefinitionBase64) {
return NewErrorf("--nextTaskDefinitionArn or --nextTaskDefinitionBase64 must be provided")
}
if isEmpty(dest.Region) {
dest.Region = aws.String(kDefaultRegion)
}
if isEmpty(dest.CanaryService) {
dest.CanaryService = aws.String(fmt.Sprintf("%s-canary", *dest.Service))
}
return nil
}
func (e *Envars) LoadFromFiles(dir string) error {
svcPath := filepath.Join(dir, "service.json")
tdPath := filepath.Join(dir, "task-definition.json")
_, noSvc := os.Stat(svcPath)
_, noTd := os.Stat(tdPath)
if noSvc != nil || noTd != nil {
return NewErrorf("roll out context specified at '%s' but no 'service.json' or 'task-definition.json'", dir)
}
var (
svc = &ecs.CreateServiceInput{}
td = &ecs.RegisterTaskDefinitionInput{}
svcBase64 string
tdBase64 string
)
if d, err := ReadAndUnmarshalJson(svcPath, svc); err != nil {
return NewErrorf("failed to read and unmarshal service.json: %s", err)
} else {
svcBase64 = base64.StdEncoding.EncodeToString(d)
}
if d, err := ReadAndUnmarshalJson(tdPath, td); err != nil {
return NewErrorf("failed to read and unmarshal task-definition.json: %s", err)
} else {
tdBase64 = base64.StdEncoding.EncodeToString(d)
}
e.Cluster = svc.Cluster
e.Service = svc.ServiceName
e.ServiceDefinitionBase64 = &svcBase64
e.TaskDefinitionBase64 = &tdBase64
return nil
}
func (e *Envars) Merge(o *Envars) error {
if !isEmpty(o.Region) {
e.Region = o.Region
}
if !isEmpty(o.Cluster) {
e.Cluster = o.Cluster
}
if !isEmpty(o.Service) {
e.Service = o.Service
}
if !isEmpty(o.CanaryService) {
e.CanaryService = o.CanaryService
}
if !isEmpty(o.TaskDefinitionBase64) {
e.TaskDefinitionBase64 = o.TaskDefinitionBase64
}
if !isEmpty(o.TaskDefinitionArn) {
e.TaskDefinitionArn = o.TaskDefinitionArn
}
if !isEmpty(o.ServiceDefinitionBase64) {
e.ServiceDefinitionBase64 = o.ServiceDefinitionBase64
}
return nil
}
func ReadAndUnmarshalJson(path string, dest interface{}) ([]byte, error) {
if d, err := ReadFileAndApplyEnvars(path); err != nil {
return d, err
} else {
if err := json.Unmarshal(d, dest); err != nil {
return d, err
}
return d, nil
}
}
|
package delivery
import (
"encoding/json"
"github.com/Arkadiyche/bd_techpark/internal/pkg/models"
"github.com/Arkadiyche/bd_techpark/internal/pkg/thread"
"github.com/gorilla/mux"
"net/http"
)
type ThreadHandler struct {
UseCase thread.UseCase
}
func (th *ThreadHandler) Create(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
//fmt.Println("thread Create")
thread := models.Thread{}
vars := mux.Vars(r)
if err := json.NewDecoder(r.Body).Decode(&thread); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
thread.Forum = vars["slug"]
t, err := th.UseCase.Create(&thread)
//fmt.Println(err)
if err != nil {
switch err.Message {
case models.NotExist.Error():
res, err := json.Marshal(err)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(404)
w.Write(res)
case models.Exist.Error():
res, err := json.Marshal(t)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(409)
w.Write(res)
default:
http.Error(w, err.Message, http.StatusInternalServerError)
return
}
} else {
res, err := json.Marshal(thread)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(201)
w.Write(res)
}
}
func (th *ThreadHandler) ForumThreads(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
slug := vars["slug"]
ts, err := th.UseCase.ForumThreads(slug, *r.URL)
if err != nil {
res, err := json.Marshal(err)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(404)
w.Write(res)
} else {
res, err1 := json.Marshal(ts)
if err1 != nil {
http.Error(w, err1.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(200)
w.Write(res)
}
}
func (th *ThreadHandler) GetThread(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
SlugOrId := vars["slug_or_id"]
t, err := th.UseCase.GetThread(SlugOrId)
if err != nil {
res, err := json.Marshal(err)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(404)
w.Write(res)
} else {
res, err1 := json.Marshal(t)
if err1 != nil {
http.Error(w, err1.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(200)
w.Write(res)
}
}
func (th *ThreadHandler) UpdateThread(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
thread := models.Thread{}
vars := mux.Vars(r)
SlugOrId := vars["slug_or_id"]
if err := json.NewDecoder(r.Body).Decode(&thread); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
t, err := th.UseCase.UpdateThread(SlugOrId, thread)
if err != nil {
res, err := json.Marshal(err)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(404)
w.Write(res)
} else {
res, err1 := json.Marshal(t)
if err1 != nil {
http.Error(w, err1.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(200)
w.Write(res)
}
}
func (th *ThreadHandler) Vote(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
vote := models.Vote{}
vars := mux.Vars(r)
if err := json.NewDecoder(r.Body).Decode(&vote); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
SlugOrId := vars["slug_or_id"]
t, err := th.UseCase.Vote(SlugOrId, vote)
if err != nil {
res, err := json.Marshal(err)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(404)
w.Write(res)
} else {
res, err1 := json.Marshal(t)
if err1 != nil {
http.Error(w, err1.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(200)
w.Write(res)
}
}
|
package dto
type ExerciseStarted struct {
ExerciseId int `json:"exercise_id" db:"ExerciseId"`
UserId int `json:"user_id" db:"UserId"`
IsCompleted bool `json:"is_completed" db:"IsCompleted"`
StartDate *TimeJson `json:"start_date" db:"StartDate"`
CompleteDate *TimeJson `json:"complete_date" db:"CompleteDate"`
}
|
package main
import (
"fmt"
"github.com/jackytck/projecteuler/tools"
)
func count(p, r int) (int, []int) {
var pos []int
digits := tools.Digits(p)
for i, d := range digits {
if d == r && i != len(digits)-1 {
pos = append(pos, i)
}
}
return len(pos), pos
}
func replace(p, r int, pos []int) int {
digits := tools.Digits(p)
for _, p := range pos {
digits[p] = r
}
return tools.JoinInts(digits)
}
func candidate(p, r int, primes map[int]bool) (bool, []int) {
var can []int
size, pos := count(p, r)
if size != 3 {
return false, can
}
for i := r; i <= 9; i++ {
x := replace(p, i, pos)
if primes[x] {
can = append(can, x)
}
}
return len(can) == 8, can
}
func check(p int, primes map[int]bool) bool {
for r := 0; r < 3; r++ {
if valid, _ := candidate(p, r, primes); valid {
return true
}
}
return false
}
func solve() int {
var ans int
primes := tools.SievePrime(1000000)
m := make(map[int]bool)
for _, p := range primes {
m[p] = true
}
for _, p := range primes {
if p < 10000 {
continue
}
if check(p, m) {
ans = p
break
}
}
return ans
}
func main() {
fmt.Println(solve())
}
// Find the smallest prime which, by replacing part of the number with the same
// digit, is part of an eight prime value family.
// Note:
// Assume the answer has 5 or 6 digits, then the number of repeating digits
// must be 3.
|
package main
import (
"fmt"
"sync"
)
/**
RWMutex 读写锁,同样没有记录当前是谁持有此锁,所以如果使用不当容易发生死锁现象
例如,最常见的不可复制
1 RWMutex 读写锁,是写优先,什么是锁优先? 当前有读锁持有锁时,写锁也会进行等待,只是此时后面来的读锁要等待写锁。
这就是写锁的优先级体现(此时会不会有其他读锁在等待呢? 不会,因为读读不互斥)
*/
func RWT(mutex sync.RWMutex) {
mutex.Lock()
defer mutex.Unlock()
fmt.Println("加锁")
}
func main() {
var mutex sync.RWMutex
mutex.Lock()
defer mutex.Unlock()
fmt.Println("加锁")
//RWT(mutex)
}
|
package connectors
import (
"fmt"
"errors"
"encoding/json"
"github.com/go-playground/validator/v10"
log "github.com/sirupsen/logrus"
)
var (
// define custom errors
ErrInvalidYelpMetadata = errors.New("Invalid yelp metadata")
// create new validator
validate = validator.New()
)
// function to parse yelp metadata from business metadata, note
// that businesses might not have a yelp metadata entry in the
// postgres database, in which case an error is returned
func ParseYelpMetadata(data map[string]interface{}) (YelpMetadata, error) {
log.Debug(fmt.Sprintf("converting %+v to yelp metadata", data))
// convert to JSON string to parse struct
jsonString, err := json.Marshal(data)
if err != nil {
log.Error(fmt.Errorf("unable to convert map to yelp metadata"))
return YelpMetadata{}, ErrInvalidYelpMetadata
}
var meta YelpMetadata
// unmarshal JSON string into struct; return error if not possible
if err := json.Unmarshal(jsonString, &meta); err != nil {
log.Error(fmt.Errorf("unable to cast metadata to yelp format: %+v", err))
return YelpMetadata{}, ErrInvalidYelpMetadata
}
// return struct (and validate to catch any errors)
return meta, validate.Struct(meta)
}
// struct to store metadata required to make Yelp requests
type YelpMetadata struct{
YelpBusinessId string `json:"yelp_business_id" validate:"required"`
}
// struct used to store geo coordinates
type GeoCoordinates struct{
Latitude float64 `json:"latitude" validate:"required"`
Longitude float64 `json:"longitude" validate:"required"`
}
// struct to store values retrieved from yelp API
type YelpBusinessResults struct{
BusinessId string `json:"business_id"`
BusinessName string `json:"business_name"`
PhoneNumber string `json:"phone_number"`
IsOpen bool `json:"is_open"`
}
// struct to store API response body from yelp
type YelpBusinessResponse struct{
Id string `json:"id"`
Name string `json:"name"`
Phone string `json:"phone"`
IsClosed bool `json:"is_closed"`
Coordinates GeoCoordinates `json:"coordinates"`
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importinto
import (
"context"
"encoding/json"
"runtime"
"sync"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/lightning/backend/kv"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/verification"
"github.com/pingcap/tidb/disttask/framework/proto"
"github.com/pingcap/tidb/disttask/framework/scheduler"
"github.com/pingcap/tidb/executor/asyncloaddata"
"github.com/pingcap/tidb/executor/importer"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
// importStepScheduler is a scheduler for import step.
// Scheduler is equivalent to a Lightning instance.
type importStepScheduler struct {
taskID int64
taskMeta *TaskMeta
tableImporter *importer.TableImporter
sharedVars sync.Map
logger *zap.Logger
importCtx context.Context
importCancel context.CancelFunc
wg sync.WaitGroup
}
func (s *importStepScheduler) InitSubtaskExecEnv(ctx context.Context) error {
s.logger.Info("init subtask env")
idAlloc := kv.NewPanickingAllocators(0)
tbl, err := tables.TableFromMeta(idAlloc, s.taskMeta.Plan.TableInfo)
if err != nil {
return err
}
astArgs, err := importer.ASTArgsFromStmt(s.taskMeta.Stmt)
if err != nil {
return err
}
controller, err := importer.NewLoadDataController(&s.taskMeta.Plan, tbl, astArgs)
if err != nil {
return err
}
// todo: this method will load all files, but we only import files related to current subtask.
if err := controller.InitDataFiles(ctx); err != nil {
return err
}
tableImporter, err := importer.NewTableImporter(&importer.JobImportParam{
GroupCtx: ctx,
Progress: asyncloaddata.NewProgress(false),
Job: &asyncloaddata.Job{},
}, controller, s.taskID)
if err != nil {
return err
}
s.tableImporter = tableImporter
// we need this sub context since CleanupSubtaskExecEnv which wait on this routine is called
// before parent context is canceled in normal flow.
s.importCtx, s.importCancel = context.WithCancel(ctx)
s.wg.Add(1)
go func() {
defer s.wg.Done()
s.tableImporter.CheckDiskQuota(s.importCtx)
}()
return nil
}
func (s *importStepScheduler) SplitSubtask(ctx context.Context, bs []byte) ([]proto.MinimalTask, error) {
var subtaskMeta ImportStepMeta
err := json.Unmarshal(bs, &subtaskMeta)
if err != nil {
return nil, err
}
s.logger.Info("split subtask", zap.Int32("engine-id", subtaskMeta.ID))
dataEngine, err := s.tableImporter.OpenDataEngine(ctx, subtaskMeta.ID)
if err != nil {
return nil, err
}
// Unlike in Lightning, we start an index engine for each subtask, whereas previously there was only a single index engine globally.
// This is because the scheduler currently does not have a post-processing mechanism.
// If we import the index in `cleanupSubtaskEnv`, the dispatcher will not wait for the import to complete.
// Multiple index engines may suffer performance degradation due to range overlap.
// These issues will be alleviated after we integrate s3 sorter.
// engineID = -1, -2, -3, ...
indexEngine, err := s.tableImporter.OpenIndexEngine(ctx, common.IndexEngineID-subtaskMeta.ID)
if err != nil {
return nil, err
}
sharedVars := &SharedVars{
TableImporter: s.tableImporter,
DataEngine: dataEngine,
IndexEngine: indexEngine,
Progress: asyncloaddata.NewProgress(false),
Checksum: &verification.KVChecksum{},
}
s.sharedVars.Store(subtaskMeta.ID, sharedVars)
miniTask := make([]proto.MinimalTask, 0, len(subtaskMeta.Chunks))
for _, chunk := range subtaskMeta.Chunks {
miniTask = append(miniTask, &importStepMinimalTask{
Plan: s.taskMeta.Plan,
Chunk: chunk,
SharedVars: sharedVars,
})
}
return miniTask, nil
}
func (s *importStepScheduler) OnSubtaskFinished(ctx context.Context, subtaskMetaBytes []byte) ([]byte, error) {
var subtaskMeta ImportStepMeta
if err := json.Unmarshal(subtaskMetaBytes, &subtaskMeta); err != nil {
return nil, err
}
s.logger.Info("on subtask finished", zap.Int32("engine-id", subtaskMeta.ID))
val, ok := s.sharedVars.Load(subtaskMeta.ID)
if !ok {
return nil, errors.Errorf("sharedVars %d not found", subtaskMeta.ID)
}
sharedVars, ok := val.(*SharedVars)
if !ok {
return nil, errors.Errorf("sharedVars %d not found", subtaskMeta.ID)
}
// TODO: we should close and cleanup engine in all case, since there's no checkpoint.
s.logger.Info("import data engine", zap.Int32("engine-id", subtaskMeta.ID))
closedDataEngine, err := sharedVars.DataEngine.Close(ctx)
if err != nil {
return nil, err
}
dataKVCount, err := s.tableImporter.ImportAndCleanup(ctx, closedDataEngine)
if err != nil {
return nil, err
}
s.logger.Info("import index engine", zap.Int32("engine-id", subtaskMeta.ID))
if closedEngine, err := sharedVars.IndexEngine.Close(ctx); err != nil {
return nil, err
} else if _, err := s.tableImporter.ImportAndCleanup(ctx, closedEngine); err != nil {
return nil, err
}
sharedVars.mu.Lock()
defer sharedVars.mu.Unlock()
subtaskMeta.Checksum.Sum = sharedVars.Checksum.Sum()
subtaskMeta.Checksum.KVs = sharedVars.Checksum.SumKVS()
subtaskMeta.Checksum.Size = sharedVars.Checksum.SumSize()
subtaskMeta.Result = Result{
ReadRowCnt: sharedVars.Progress.ReadRowCnt.Load(),
LoadedRowCnt: uint64(dataKVCount),
ColSizeMap: sharedVars.Progress.GetColSize(),
}
s.sharedVars.Delete(subtaskMeta.ID)
return json.Marshal(subtaskMeta)
}
func (s *importStepScheduler) CleanupSubtaskExecEnv(_ context.Context) (err error) {
s.logger.Info("cleanup subtask env")
s.importCancel()
s.wg.Wait()
return s.tableImporter.Close()
}
func (s *importStepScheduler) Rollback(context.Context) error {
// TODO: add rollback
s.logger.Info("rollback")
return nil
}
type postStepScheduler struct {
scheduler.EmptyScheduler
taskID int64
taskMeta *TaskMeta
logger *zap.Logger
}
var _ scheduler.Scheduler = &postStepScheduler{}
func (p *postStepScheduler) SplitSubtask(_ context.Context, metaBytes []byte) ([]proto.MinimalTask, error) {
mTask := &postProcessStepMinimalTask{
taskMeta: p.taskMeta,
logger: p.logger,
}
if err := json.Unmarshal(metaBytes, &mTask.meta); err != nil {
return nil, err
}
return []proto.MinimalTask{mTask}, nil
}
func init() {
prepareFn := func(taskID int64, bs []byte, step int64) (*TaskMeta, *zap.Logger, error) {
taskMeta := TaskMeta{}
if err := json.Unmarshal(bs, &taskMeta); err != nil {
return nil, nil, err
}
logger := logutil.BgLogger().With(
zap.String("type", proto.ImportInto),
zap.Int64("task-id", taskID),
zap.String("step", stepStr(step)),
)
logger.Info("create step scheduler")
return &taskMeta, logger, nil
}
scheduler.RegisterTaskType(proto.ImportInto, scheduler.WithPoolSize(int32(runtime.GOMAXPROCS(0))))
scheduler.RegisterSchedulerConstructor(proto.ImportInto, StepImport,
func(ctx context.Context, taskID int64, bs []byte, step int64) (scheduler.Scheduler, error) {
// TODO(tangenta): use context for lifetime control.
taskMeta, logger, err := prepareFn(taskID, bs, step)
if err != nil {
return nil, err
}
return &importStepScheduler{
taskID: taskID,
taskMeta: taskMeta,
logger: logger,
}, nil
},
)
scheduler.RegisterSchedulerConstructor(proto.ImportInto, StepPostProcess,
func(ctx context.Context, taskID int64, bs []byte, step int64) (scheduler.Scheduler, error) {
// TODO(tangenta): use context for lifetime control.
taskMeta, logger, err := prepareFn(taskID, bs, step)
if err != nil {
return nil, err
}
return &postStepScheduler{
taskID: taskID,
taskMeta: taskMeta,
logger: logger,
}, nil
},
)
}
|
package cache
import (
"bytes"
"fmt"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCache(t *testing.T) {
t.Parallel()
conf := Config{}
var rmKey, rmVal []byte
conf.OnDelete = func(key, val []byte) {
rmKey = key
rmVal = val
}
conf.MaxSize = 12
conf.MaxElementSize = 12
conf.MaxCount = 3
conf.EnableLRU = true
c := New(conf)
var d []byte
// get - not found
assert.True(t, c.Get([]byte("k1")) == nil)
// add new
assert.True(t, !c.Set([]byte("k1"), []byte("v1")))
assert.True(t, !c.Set([]byte("k2"), []byte("v2")))
assert.True(t, c.Stats().Count == 2)
// get added
d = c.Get([]byte("k1"))
assert.True(t, bytes.Equal(d, []byte("v1")))
d = c.Get([]byte("k2"))
assert.True(t, bytes.Equal(d, []byte("v2")))
// replace existing
assert.True(t, c.Set([]byte("k1"), []byte("v!")))
d = c.Get([]byte("k1"))
assert.True(t, bytes.Equal(d, []byte("v!")))
// delete
c.Del([]byte("k1"))
assert.True(t, c.Get([]byte("k1")) == nil)
c.Clear()
// MaxCount limit
assert.True(t, !c.Set([]byte("k1"), []byte("v1")))
assert.True(t, !c.Set([]byte("k2"), []byte("v2")))
assert.True(t, !c.Set([]byte("k3"), []byte("v3")))
rmKey = nil
rmVal = nil
assert.True(t, !c.Set([]byte("k4"), []byte("v4"))) // "k1" is removed
assert.True(t, bytes.Equal(rmKey, []byte("k1")))
assert.True(t, bytes.Equal(rmVal, []byte("v1")))
c.Clear()
// MaxSize limit
assert.True(t, !c.Set([]byte("k1"), []byte("v1")))
rmKey = nil
rmVal = nil
assert.True(t, !c.Set([]byte("k2"), []byte("1234567"))) // "k1" is removed
assert.True(t, bytes.Equal(rmKey, []byte("k1")))
c.Clear()
// MaxElementSize limit
assert.True(t, !c.Set([]byte("k1"), []byte("12345678901")))
assert.True(t, c.Get([]byte("k1")) == nil)
c.Del([]byte("k1"))
assert.True(t, c.Stats().Count == 0)
assert.True(t, c.Stats().Size == 0)
}
// Set, get, delete items in parallel
func TestParallel(t *testing.T) {
t.Parallel()
conf := Config{}
conf.EnableLRU = true
conf.MaxSize = 1024
c := New(conf)
wg := sync.WaitGroup{}
N := 100
for w := 0; w != 100; w++ {
wg.Add(1)
go func(wid int) {
for i := 0; i != N; i++ {
key := []byte(fmt.Sprintf("key-%d-%d", wid, i))
val := []byte{1, 2, 3, byte(i % 255)}
_ = c.Set(key, val)
rval := c.Get(key)
if rval != nil {
assert.True(t, val[3] == rval[3])
}
c.Del(key)
}
wg.Done()
}(w)
}
wg.Wait()
}
|
package k8s
import (
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
// Client ...
type Client struct {
c *kubernetes.Clientset
}
// NewClient creates new Kubernetes client.
func NewClient() (*Client, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &Client{
c: client,
}, nil
}
// Pods returns a slice of pods.
func (c *Client) Pods() ([]Pod, error) {
list, err := c.c.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
return nil, err
}
pods := make([]Pod, len(list.Items))
for i, pod := range list.Items {
pods[i] = Pod{
Name: pod.ObjectMeta.Name,
Containers: make(map[string]*Container, len(pod.Spec.Containers)),
InitContainers: make(map[string]*Container, len(pod.Spec.InitContainers)),
}
for _, container := range pod.Spec.Containers {
pods[i].Containers[container.Name] = &Container{
Image: container.Image,
}
}
for _, container := range pod.Spec.InitContainers {
pods[i].InitContainers[container.Name] = &Container{
Image: container.Image,
}
}
for _, status := range pod.Status.ContainerStatuses {
pods[i].Containers[status.Name].Digest = strings.SplitN(status.ImageID, ":", 3)[2]
}
for _, status := range pod.Status.InitContainerStatuses {
pods[i].InitContainers[status.Name].Digest = strings.SplitN(status.ImageID, ":", 3)[2]
}
}
return pods, nil
}
// Pod ...
type Pod struct {
Name string
Containers map[string]*Container
InitContainers map[string]*Container
}
// Container ...
type Container struct {
Image string
Digest string
}
|
package fileversion
import "github.com/scjalliance/drivestream/resource"
// A Map is a map of file versions.
type Map interface {
// List returns a list of version numbers for the file.
List() (v []resource.Version, err error)
// Ref returns a file version reference for the version number.
Ref(v resource.Version) Reference
}
|
package utils
import "fmt"
type RowRecord struct {
timestamp int64
fieldList []Field
}
func NewRowRecord(timestamp int64, fieldList []Field) *RowRecord {
r_ := &RowRecord{}
r_.timestamp = timestamp
r_.fieldList = fieldList
return r_
}
func (r_ *RowRecord) AddField(f Field) {
r_.fieldList = append(r_.fieldList, f)
}
func (r_ *RowRecord) AddFieldWithValue(dataType int32, value interface{}) {
r_.fieldList = append(r_.fieldList, *NewField(dataType, value))
}
func (r_ *RowRecord) ToString() string {
str := string(r_.timestamp)
for _, v := range r_.fieldList {
str += "\t\t" + v.ToString()
}
return str
}
func (r_ *RowRecord) SetTimestamp(timestamp int64) {
r_.timestamp = timestamp
}
func (r_ *RowRecord) GetTimestamp() int64 {
return r_.timestamp
}
func (r_ *RowRecord) SetField(index int, field Field) bool {
if index < len(r_.fieldList) {
r_.fieldList[index] = field
return true
}
fmt.Println("Out of FieldList Length!")
return false
}
func (r_ *RowRecord) SetFields(fields []Field) {
r_.fieldList = fields
}
func (r_ *RowRecord) GetFields() []Field {
return r_.fieldList
}
|
// Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package android
import (
"sync"
"github.com/google/blueprint"
)
var phonyMapOnceKey = NewOnceKey("phony")
type phonyMap map[string]Paths
var phonyMapLock sync.Mutex
func getPhonyMap(config Config) phonyMap {
return config.Once(phonyMapOnceKey, func() interface{} {
return make(phonyMap)
}).(phonyMap)
}
func addPhony(config Config, name string, deps ...Path) {
phonyMap := getPhonyMap(config)
phonyMapLock.Lock()
defer phonyMapLock.Unlock()
phonyMap[name] = append(phonyMap[name], deps...)
}
type phonySingleton struct {
phonyMap phonyMap
phonyList []string
}
var _ SingletonMakeVarsProvider = (*phonySingleton)(nil)
func (p *phonySingleton) GenerateBuildActions(ctx SingletonContext) {
p.phonyMap = getPhonyMap(ctx.Config())
p.phonyList = SortedStringKeys(p.phonyMap)
for _, phony := range p.phonyList {
p.phonyMap[phony] = SortedUniquePaths(p.phonyMap[phony])
}
if !ctx.Config().EmbeddedInMake() {
for _, phony := range p.phonyList {
ctx.Build(pctx, BuildParams{
Rule: blueprint.Phony,
Outputs: []WritablePath{PathForPhony(ctx, phony)},
Implicits: p.phonyMap[phony],
})
}
}
}
func (p phonySingleton) MakeVars(ctx MakeVarsContext) {
for _, phony := range p.phonyList {
ctx.Phony(phony, p.phonyMap[phony]...)
}
}
func phonySingletonFactory() Singleton {
return &phonySingleton{}
}
|
package server
import (
"net/http"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/jackc/pgx"
"github.com/jackc/yakstak/server/handlers"
)
type TodoRow struct {
id int32
body string
done bool
}
func Serve() {
db, err := createDB()
if err != nil {
panic(err)
}
r := chi.NewRouter()
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
r.Method("GET", "/", &handlers.YakstakIndex{DB: db})
http.ListenAndServe(":3000", r)
}
func createDB() (*pgx.ConnPool, error) {
connConfig := pgx.ConnConfig{Host: "/var/run/postgresql", Database: "yakstak_dev"}
poolConfig := pgx.ConnPoolConfig{
ConnConfig: connConfig,
MaxConnections: 10,
}
return pgx.NewConnPool(poolConfig)
}
|
package open_resource_discovery
import (
"encoding/json"
"regexp"
"strings"
"time"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/go-ozzo/ozzo-validation/v4/is"
"github.com/kyma-incubator/compass/components/director/internal/model"
"github.com/pkg/errors"
"github.com/tidwall/gjson"
)
// Disclaimer: All regexes below are provided by the ORD spec itself.
const (
SemVerRegex = "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$"
PackageOrdIDRegex = "^([a-zA-Z0-9._\\-]+):(package):([a-zA-Z0-9._\\-]+):(alpha|beta|v[0-9]+)$"
VendorOrdIDRegex = "^([a-z0-9]+)$"
ProductOrdIDRegex = "^([a-z0-9]+):([a-zA-Z0-9._\\-]+)$"
BundleOrdIDRegex = "^([a-zA-Z0-9._\\-]+):(consumptionBundle):([a-zA-Z0-9._\\-]+):v([0-9]+)$"
//TODO: further clarification is needed as what is required by the spec
//TombstoneOrdIDRegex = "^([a-zA-Z0-9._\\-]+):(package|apiResource|eventResource):([a-zA-Z0-9._\\-]+):v([0-9]+)$"
StringArrayElementRegex = "^[a-zA-Z0-9 -\\.\\/]*$"
CountryRegex = "^[A-Z]{2}$"
ApiOrdIDRegex = "^([a-zA-Z0-9._\\-]+):(apiResource):([a-zA-Z0-9._\\-]+):(alpha|beta|v[0-9]+)$"
EventOrdIDRegex = "^([a-zA-Z0-9._\\-]+):(eventResource):([a-zA-Z0-9._\\-]+):(alpha|beta|v[0-9]+)$"
PPMSObjectIDRegex = "^([0-9]+)$"
LabelsKeyRegex = "^[a-zA-Z0-9-_.]*$"
)
var shortDescriptionRules = []validation.Rule{
validation.Required, validation.Length(1, 255), validation.NewStringRule(noNewLines, "short description should not contain line breaks"),
}
var descriptionRules = []validation.Rule{
validation.Required, validation.Length(1, 255), validation.NewStringRule(noNewLines, "description should not contain line breaks"),
}
func validateDocumentInput(doc *Document) error {
return validation.ValidateStruct(doc, validation.Field(&doc.OpenResourceDiscovery, validation.Required, validation.In("1.0-rc.1")))
}
func validatePackageInput(pkg *model.PackageInput) error {
return validation.ValidateStruct(pkg,
validation.Field(&pkg.OrdID, validation.Required, validation.Match(regexp.MustCompile(PackageOrdIDRegex))),
validation.Field(&pkg.Title, validation.Required),
validation.Field(&pkg.ShortDescription, shortDescriptionRules...),
validation.Field(&pkg.Description, validation.Required),
validation.Field(&pkg.Version, validation.Required, validation.Match(regexp.MustCompile(SemVerRegex))),
validation.Field(&pkg.PolicyLevel, validation.Required, validation.In("sap", "sap-partner", "custom"), validation.When(pkg.CustomPolicyLevel != nil, validation.In("custom"))),
validation.Field(&pkg.CustomPolicyLevel, validation.When(pkg.PolicyLevel != "custom", validation.Empty)),
validation.Field(&pkg.PackageLinks, validation.By(validatePackageLinks)),
validation.Field(&pkg.Links, validation.By(validateORDLinks)),
validation.Field(&pkg.Vendor, validation.Required, validation.Match(regexp.MustCompile(VendorOrdIDRegex))),
validation.Field(&pkg.PartOfProducts, validation.Required, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(ProductOrdIDRegex))
})),
validation.Field(&pkg.Tags, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&pkg.Labels, validation.By(validateORDLabels)),
validation.Field(&pkg.Countries, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(CountryRegex))
})),
validation.Field(&pkg.LineOfBusiness, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&pkg.Industry, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
)
}
func validateBundleInput(bndl *model.BundleCreateInput) error {
return validation.ValidateStruct(bndl,
validation.Field(&bndl.OrdID, validation.Required, validation.Match(regexp.MustCompile(BundleOrdIDRegex))),
validation.Field(&bndl.Name, validation.Required),
validation.Field(&bndl.ShortDescription, shortDescriptionRules...),
validation.Field(&bndl.Description, descriptionRules...),
validation.Field(&bndl.Links, validation.By(validateORDLinks)),
validation.Field(&bndl.Labels, validation.By(validateORDLabels)),
validation.Field(&bndl.CredentialExchangeStrategies, validation.By(func(value interface{}) error {
return validateJSONArrayOfObjects(value, map[string][]validation.Rule{
"type": {
validation.Required,
validation.In("custom"),
},
"callbackUrl": {
is.RequestURI,
},
}, validateCustomType, validateCustomDescription)
})),
)
}
func validateAPIInput(api *model.APIDefinitionInput) error {
return validation.ValidateStruct(api,
validation.Field(&api.OrdID, validation.Required, validation.Match(regexp.MustCompile(ApiOrdIDRegex))),
validation.Field(&api.Name, validation.Required),
validation.Field(&api.ShortDescription, shortDescriptionRules...),
validation.Field(&api.Description, validation.Required),
validation.Field(&api.VersionInput.Value, validation.Required, validation.Match(regexp.MustCompile(SemVerRegex))),
validation.Field(&api.OrdPackageID, validation.Required, validation.Match(regexp.MustCompile(PackageOrdIDRegex))),
validation.Field(&api.OrdBundleID, validation.When(api.OrdBundleID != nil, validation.Match(regexp.MustCompile(BundleOrdIDRegex)))),
validation.Field(&api.ApiProtocol, validation.Required, validation.In("odata-v2", "odata-v4", "soap-inbound", "soap-outbound", "rest", "sap-rfc")),
validation.Field(&api.Visibility, validation.Required, validation.In("public", "internal", "private")),
validation.Field(&api.PartOfProducts, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(ProductOrdIDRegex))
})),
validation.Field(&api.Tags, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&api.Countries, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(CountryRegex))
})),
validation.Field(&api.LineOfBusiness, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&api.Industry, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&api.ResourceDefinitions, validation.Required),
validation.Field(&api.APIResourceLinks, validation.By(validateAPILinks)),
validation.Field(&api.Links, validation.By(validateORDLinks)),
validation.Field(&api.ReleaseStatus, validation.Required, validation.In("beta", "active", "deprecated")),
validation.Field(&api.SunsetDate, validation.When(*api.ReleaseStatus == "deprecated", validation.Required), validation.When(api.SunsetDate != nil, validation.By(isValidDate(api.SunsetDate)))),
validation.Field(&api.Successor, validation.When(*api.ReleaseStatus == "deprecated", validation.Required), validation.Match(regexp.MustCompile(ApiOrdIDRegex))),
validation.Field(&api.ChangeLogEntries, validation.By(validateORDChangeLogEntries)),
validation.Field(&api.TargetURL, validation.Required, is.RequestURI),
validation.Field(&api.Labels, validation.By(validateORDLabels)),
)
}
func validateEventInput(event *model.EventDefinitionInput) error {
return validation.ValidateStruct(event,
validation.Field(&event.OrdID, validation.Required, validation.Match(regexp.MustCompile(EventOrdIDRegex))),
validation.Field(&event.Name, validation.Required),
validation.Field(&event.ShortDescription, shortDescriptionRules...),
validation.Field(&event.Description, validation.Required),
validation.Field(&event.VersionInput.Value, validation.Required, validation.Match(regexp.MustCompile(SemVerRegex))),
validation.Field(&event.OrdPackageID, validation.Required, validation.Match(regexp.MustCompile(PackageOrdIDRegex))),
validation.Field(&event.OrdBundleID, validation.When(event.OrdBundleID != nil, validation.Match(regexp.MustCompile(BundleOrdIDRegex)))),
validation.Field(&event.Visibility, validation.Required, validation.In("public", "internal", "private")),
validation.Field(&event.PartOfProducts, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(ProductOrdIDRegex))
})),
validation.Field(&event.Tags, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&event.Countries, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(CountryRegex))
})),
validation.Field(&event.LineOfBusiness, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&event.Industry, validation.By(func(value interface{}) error {
return validateJSONArrayOfStrings(value, regexp.MustCompile(StringArrayElementRegex))
})),
validation.Field(&event.ResourceDefinitions, validation.Required),
validation.Field(&event.Links, validation.By(validateORDLinks)),
validation.Field(&event.ReleaseStatus, validation.Required, validation.In("beta", "active", "deprecated")),
validation.Field(&event.SunsetDate, validation.When(*event.ReleaseStatus == "deprecated", validation.Required), validation.When(event.SunsetDate != nil, validation.By(isValidDate(event.SunsetDate)))),
validation.Field(&event.Successor, validation.When(*event.ReleaseStatus == "deprecated", validation.Required), validation.Match(regexp.MustCompile(EventOrdIDRegex))),
validation.Field(&event.ChangeLogEntries, validation.By(validateORDChangeLogEntries)),
validation.Field(&event.Labels, validation.By(validateORDLabels)),
)
}
func validateProductInput(product *model.ProductInput) error {
return validation.ValidateStruct(product,
validation.Field(&product.OrdID, validation.Required, validation.Match(regexp.MustCompile(ProductOrdIDRegex))),
validation.Field(&product.Title, validation.Required),
validation.Field(&product.ShortDescription, shortDescriptionRules...),
validation.Field(&product.Vendor, validation.Required, validation.Match(regexp.MustCompile(VendorOrdIDRegex))),
validation.Field(&product.Parent, validation.When(product.Parent != nil, validation.Match(regexp.MustCompile(ProductOrdIDRegex)))),
validation.Field(&product.PPMSObjectID, validation.When(product.PPMSObjectID != nil, validation.Match(regexp.MustCompile(PPMSObjectIDRegex)))),
validation.Field(&product.Labels, validation.By(validateORDLabels)),
)
}
func validateVendorInput(vendor *model.VendorInput) error {
return validation.ValidateStruct(vendor,
validation.Field(&vendor.OrdID, validation.Required, validation.Match(regexp.MustCompile(VendorOrdIDRegex))),
validation.Field(&vendor.Title, validation.Required),
validation.Field(&vendor.Type, validation.Required, validation.In("sap", "sap-partner", "client-registration")),
validation.Field(&vendor.Labels, validation.By(validateORDLabels)),
)
}
func validateTombstoneInput(tombstone *model.TombstoneInput) error {
return validation.ValidateStruct(tombstone,
//TODO: further clarification is needed as what is required by the spec
//validation.Field(&tombstone.OrdID, validation.Required, validation.Match(regexp.MustCompile(TombstoneOrdIDRegex))),
validation.Field(&tombstone.RemovalDate, validation.Required, validation.Date(time.RFC3339)))
}
func validateORDLabels(val interface{}) error {
if val == nil {
return nil
}
labels, ok := val.(json.RawMessage)
if !ok {
return errors.New("labels should be json")
}
if len(labels) == 0 {
return nil
}
if !gjson.ValidBytes(labels) {
return errors.New("labels should be valid json")
}
parsedLabels := gjson.ParseBytes(labels)
if !parsedLabels.IsObject() {
return errors.New("labels should be json object")
}
var err error
parsedLabels.ForEach(func(key, value gjson.Result) bool {
if err = validation.Validate(key.String(), validation.Match(regexp.MustCompile(LabelsKeyRegex))); err != nil {
return false
}
if !value.IsArray() {
err = errors.New("label value should be array")
return false
}
for _, el := range value.Array() {
if el.Type != gjson.String {
err = errors.New("label value should be array of strings")
return false
}
}
return true
})
return err
}
func validateORDChangeLogEntries(value interface{}) error {
return validateJSONArrayOfObjects(value, map[string][]validation.Rule{
"version": {
validation.Required,
validation.Match(regexp.MustCompile(SemVerRegex)),
},
"releaseStatus": {
validation.Required,
validation.In("beta", "active", "deprecated"),
},
"date": {
validation.Required,
validation.Date("2006-01-02"),
},
"url": {
is.RequestURI,
},
})
}
func validateORDLinks(value interface{}) error {
return validateJSONArrayOfObjects(value, map[string][]validation.Rule{
"title": {
validation.Required,
},
"url": {
validation.Required,
is.RequestURI,
},
})
}
func validatePackageLinks(value interface{}) error {
return validateJSONArrayOfObjects(value, map[string][]validation.Rule{
"type": {
validation.Required,
validation.In("terms-of-service", "licence", "client-registration", "payment", "sandbox", "service-level-agreement", "support", "custom"),
},
"url": {
validation.Required,
is.RequestURI,
},
}, func(el gjson.Result) error {
if el.Get("customType").Exists() && el.Get("type").String() != "custom" {
return errors.New("if customType is provided, type should be set to 'custom'")
}
return nil
})
}
func validateAPILinks(value interface{}) error {
return validateJSONArrayOfObjects(value, map[string][]validation.Rule{
"type": {
validation.Required,
validation.In("api-documentation", "authentication", "client-registration", "console", "payment", "service-level-agreement", "support", "custom"),
},
"url": {
validation.Required,
is.RequestURI,
},
}, func(el gjson.Result) error {
if el.Get("customType").Exists() && el.Get("type").String() != "custom" {
return errors.New("if customType is provided, type should be set to 'custom'")
}
return nil
})
}
func noNewLines(s string) bool {
return !strings.Contains(s, "\\n")
}
func validateJSONArrayOfStrings(arr interface{}, regexPattern *regexp.Regexp) error {
if arr == nil {
return nil
}
jsonArr, ok := arr.(json.RawMessage)
if !ok {
return errors.New("should be json")
}
if len(jsonArr) == 0 {
return nil
}
if !gjson.ValidBytes(jsonArr) {
return errors.New("should be valid json")
}
parsedArr := gjson.ParseBytes(jsonArr)
if !parsedArr.IsArray() {
return errors.New("should be json array")
}
if len(parsedArr.Array()) == 0 {
return errors.New("the json array should not be empty")
}
for _, el := range parsedArr.Array() {
if el.Type != gjson.String {
return errors.New("should be array of strings")
}
if !regexPattern.MatchString(el.String()) {
return errors.Errorf("elements should match %q", regexPattern.String())
}
}
return nil
}
func validateJSONArrayOfObjects(arr interface{}, elementFieldRules map[string][]validation.Rule, crossFieldRules ...func(gjson.Result) error) error {
if arr == nil {
return nil
}
jsonArr, ok := arr.(json.RawMessage)
if !ok {
return errors.New("should be json")
}
if len(jsonArr) == 0 {
return nil
}
if !gjson.ValidBytes(jsonArr) {
return errors.New("should be valid json")
}
parsedArr := gjson.ParseBytes(jsonArr)
if !parsedArr.IsArray() {
return errors.New("should be json array")
}
if len(parsedArr.Array()) == 0 {
return errors.New("the json array should not be empty")
}
for _, el := range parsedArr.Array() {
for field, rules := range elementFieldRules {
if err := validation.Validate(el.Get(field).Value(), rules...); err != nil {
return errors.Wrapf(err, "error validating field %s", field)
}
for _, f := range crossFieldRules {
if err := f(el); err != nil {
return err
}
}
}
}
return nil
}
func validateCustomType(el gjson.Result) error {
if el.Get("customType").Exists() && el.Get("type").String() != "custom" {
return errors.New("if customType is provided, type should be set to 'custom'")
}
return validation.Validate(el.Get("customType").String(), validation.Match(regexp.MustCompile("^([a-z0-9.]+):([a-zA-Z0-9._\\-]+):v([0-9]+)$")))
}
func validateCustomDescription(el gjson.Result) error {
if el.Get("customDescription").Exists() && el.Get("type").String() != "custom" {
return errors.New("if customDescription is provided, type should be set to 'custom'")
}
return nil
}
func isValidDate(date *string) validation.RuleFunc {
return func(value interface{}) error {
var err error
if _, err = time.Parse("2006-01-02T15:04:05Z0700", *date); err == nil {
return nil
} else if _, err = time.Parse("2006-01-02T15:04:05Z07:00", *date); err == nil {
return nil
}
return errors.New("invalid date")
}
}
|
package monitorcontroller
import (
"monitor/models"
)
// @Title GenMonitor
// @Description view statistics request
// @Success 200 {object} responses.BoolResponse
// @router /genmonitor [get]
func (this *MonitorController) GenMonitor() {
m := models.GetMonitorModel()
if m != nil {
resp := m.GetAllInfoForCharting()
this.Data["the_average_time_action"] = resp.TheAverageTimeMonitorData.TimeAction
this.Data["the_average_time_request"] = resp.TheAverageTimeMonitorData.TimeRequest
this.Data["best_action_label_count"] = resp.BestCountMonitorData.BestActionLabel
this.Data["best_action_count"] = resp.BestCountMonitorData.BestActionCount
this.Data["best_request_label_count"] = resp.BestCountMonitorData.BestRequestLabel
this.Data["best_request_count"] = resp.BestCountMonitorData.BestRequestCount
this.Data["all_action"] = resp.AllAction
this.Data["all_request"] = resp.AllRequest
this.Data["action_data_hours"] = resp.MonitorData.ActionDataHours
this.Data["action_data_count"] = resp.MonitorData.ActionDataCount
this.Data["request_data_hours"] = resp.MonitorData.RequestDataHours
this.Data["request_data_count"] = resp.MonitorData.RequestDataCount
labelLoadMinute := make([]int, 0)
for i := 0; i < 1440; i++ {
labelLoadMinute = append(labelLoadMinute, i)
}
labelLoadHour := make([]int, 0)
for i := 0; i < 24; i++ {
labelLoadHour = append(labelLoadHour, i)
}
this.Data["labelLoadMinute"] = labelLoadMinute
this.Data["labelLoadHour"] = labelLoadHour
this.Data["action_data_minutes"] = resp.MonitorData.ActionDataByMinutes
this.Data["request_data_minutes"] = resp.MonitorData.RequestDataByMinutes
this.Data["action_data_hours_line"] = resp.MonitorData.ActionDataByHours
this.Data["request_data_hours_line"] = resp.MonitorData.RequestDataByHours
// option ajax
this.Data["url_data"] = "/monitor/getall"
this.Data["time_load"] = 60
this.TplName = "genmonitor.tpl"
}
}
|
package main
import (
"flag"
"github.com/golang/glog"
"github.com/peteabre/ocp-client-go/pkg/ocpclient"
"github.com/spf13/cobra"
rapiv1 "github.com/peteabre/ocp-client-go/pkg/route/api/v1"
"os"
)
var kubeconfig *string
var metricsPort *int
func main() {
handleErr(newCmd().Execute())
}
func init() {
// We log to stderr because glog will default to logging to a file.
// By setting this debugging is easier via `kubectl logs`
flag.Set("logtostderr", "true")
}
func handleErr(err error) {
if err != nil {
os.Exit(-1)
}
}
func newCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "routes-controller",
Short: "Run a controller that prints route changes to stdout",
Long: "Run a controller that prints route changes to stdout",
RunE: runCmd,
}
kubeconfig = cmd.Flags().String("kubeconfig", "", "path to valid kubeconfig")
metricsPort = cmd.Flags().Int("metricsport", 11251, "expose metrics on port")
return cmd
}
func runCmd(cmd *cobra.Command, args []string) error {
conf, err := ocpclient.GetClientConfig(*kubeconfig)
if err != nil {
return err
}
client, err := ocpclient.NewForConfig(conf)
if err != nil {
glog.Fatalf("Failed to create OpenShift client: %v", err)
}
controller := RouteController{
oc: client,
listenAddr: metricsPort,
eventHandlers: &EventHandlerFuncs{
AddFunc: func(route *rapiv1.Route, oc ocpclient.Interface) {
glog.Infof("Route added - Namespace: %s Name: %s Host: %s\n", route.Namespace, route.Name, route.Spec.Host)
},
UpdateFunc: func(route *rapiv1.Route, oc ocpclient.Interface) {
glog.Infof("Route updated - Namespace: %s Name: %s Host: %s\n", route.Namespace, route.Name, route.Spec.Host)
},
DeleteFunc: func(route *rapiv1.Route, oc ocpclient.Interface) {
glog.Infof("Route deleted - Namespace: %s Name: %s Host: %s\n", route.Namespace, route.Name, route.Spec.Host)
},
}}
return controller.Run()
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package view
import (
"context"
"time"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
"github.com/oam-dev/kubevela/references/cli/top/component"
"github.com/oam-dev/kubevela/references/cli/top/model"
)
const (
// RefreshDelay is refresh delay
RefreshDelay = 10
resourceReqTimeout = 3
)
// ResourceView is the interface to abstract resource view
type ResourceView interface {
model.View
InitView(ctx context.Context, app *App)
Refresh(event *tcell.EventKey) *tcell.EventKey
Update(timeoutCancel func())
BuildHeader()
BuildBody()
}
// ResourceViewMap is a map from resource name to resource view
var ResourceViewMap = map[string]ResourceView{
"app": new(ApplicationView),
"cluster": new(ClusterView),
"resource": new(ManagedResourceView),
"ns": new(NamespaceView),
"cns": new(ClusterNamespaceView),
"pod": new(PodView),
"container": new(ContainerView),
}
// CommonResourceView is an abstract of resource view
type CommonResourceView struct {
*component.Table
app *App
cancelFunc func()
}
// NewCommonView return a new common view
func NewCommonView(app *App) *CommonResourceView {
resourceView := &CommonResourceView{
Table: component.NewTable(app.config.Theme),
app: app,
cancelFunc: func() {},
}
return resourceView
}
// Init the common resource view
func (v *CommonResourceView) Init() {
v.Table.Init()
v.SetBorder(true)
v.SetTitleColor(v.app.config.Theme.Table.Title.Color())
v.SetSelectable(true, false)
v.bindKeys()
v.app.SetFocus(v)
}
// Name return the name of common view
func (v *CommonResourceView) Name() string {
return "Resource"
}
// BuildHeader render the header of table
func (v *CommonResourceView) BuildHeader(header []string) {
for i := 0; i < len(header); i++ {
c := tview.NewTableCell(header[i])
c.SetTextColor(v.app.config.Theme.Table.Header.Color())
c.SetExpansion(3)
v.SetCell(0, i, c)
}
}
// BuildBody render the body of table
func (v *CommonResourceView) BuildBody(body [][]string) {
rowNum := len(body)
for i := 0; i < rowNum; i++ {
columnNum := len(body[i])
for j := 0; j < columnNum; j++ {
c := tview.NewTableCell(body[i][j])
c.SetTextColor(v.app.config.Theme.Table.Body.Color())
c.SetExpansion(3)
v.SetCell(i+1, j, c)
}
}
}
// Stop the refresh goroutine and clear the table content
func (v *CommonResourceView) Stop() {
v.Table.Stop()
v.cancelFunc()
}
// Refresh the base resource view
func (v *CommonResourceView) Refresh(clear bool, update func(timeoutCancel func())) {
if clear {
v.Clear()
}
updateWithTimeout := func() {
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*resourceReqTimeout)
defer cancelFunc()
go update(cancelFunc)
select {
case <-time.After(time.Second * resourceReqTimeout): // timeout
case <-ctx.Done(): // success
}
}
v.app.QueueUpdateDraw(updateWithTimeout)
}
// AutoRefresh will refresh the view in every RefreshDelay delay
func (v *CommonResourceView) AutoRefresh(update func(timeoutCancel func())) {
var ctx context.Context
ctx, v.cancelFunc = context.WithCancel(context.Background())
go func() {
for {
time.Sleep(RefreshDelay * time.Second)
select {
case <-ctx.Done():
return
default:
v.Refresh(true, update)
}
}
}()
}
func (v *CommonResourceView) bindKeys() {
v.Actions().Delete([]tcell.Key{tcell.KeyESC})
v.Actions().Add(model.KeyActions{
component.KeyQ: model.KeyAction{Description: "Back", Action: v.app.Back, Visible: true, Shared: true},
component.KeyHelp: model.KeyAction{Description: "Help", Action: v.app.helpView, Visible: true, Shared: true},
tcell.KeyCtrlT: model.KeyAction{Description: "Switch Theme", Action: v.app.SwitchTheme, Visible: true, Shared: true},
})
}
|
package term
import (
"time"
"github.com/brigadecore/brigade/sdk/v3"
"github.com/gdamore/tcell/v2"
)
const (
textGreen = "[green]"
textGrey = "[grey]"
textRed = "[red]"
textWhite = "[white]"
textYellow = "[yellow]"
)
var colorsByWorkerPhase = map[sdk.WorkerPhase]tcell.Color{
sdk.WorkerPhaseAborted: tcell.ColorGrey,
sdk.WorkerPhaseCanceled: tcell.ColorGrey,
sdk.WorkerPhaseFailed: tcell.ColorRed,
sdk.WorkerPhasePending: tcell.ColorWhite,
sdk.WorkerPhaseRunning: tcell.ColorYellow,
sdk.WorkerPhaseSchedulingFailed: tcell.ColorRed,
sdk.WorkerPhaseStarting: tcell.ColorYellow,
sdk.WorkerPhaseSucceeded: tcell.ColorGreen,
sdk.WorkerPhaseTimedOut: tcell.ColorRed,
sdk.WorkerPhaseUnknown: tcell.ColorGrey,
}
var textColorsByWorkerPhase = map[sdk.WorkerPhase]string{
sdk.WorkerPhaseAborted: textGrey,
sdk.WorkerPhaseCanceled: textGrey,
sdk.WorkerPhaseFailed: textRed,
sdk.WorkerPhasePending: textWhite,
sdk.WorkerPhaseRunning: textYellow,
sdk.WorkerPhaseSchedulingFailed: textRed,
sdk.WorkerPhaseStarting: textYellow,
sdk.WorkerPhaseSucceeded: textGreen,
sdk.WorkerPhaseTimedOut: textRed,
sdk.WorkerPhaseUnknown: textGrey,
}
var iconsByWorkerPhase = map[sdk.WorkerPhase]string{
sdk.WorkerPhaseAborted: "✖",
sdk.WorkerPhaseCanceled: "✖",
sdk.WorkerPhaseFailed: "✖",
sdk.WorkerPhasePending: "⟳",
sdk.WorkerPhaseRunning: "▶",
sdk.WorkerPhaseSchedulingFailed: "✖",
sdk.WorkerPhaseStarting: "▶",
sdk.WorkerPhaseSucceeded: "✔",
sdk.WorkerPhaseTimedOut: "✖",
sdk.WorkerPhaseUnknown: "?",
}
var colorsByJobPhase = map[sdk.JobPhase]tcell.Color{
sdk.JobPhaseAborted: tcell.ColorGrey,
sdk.JobPhaseCanceled: tcell.ColorGrey,
sdk.JobPhaseFailed: tcell.ColorRed,
sdk.JobPhasePending: tcell.ColorWhite,
sdk.JobPhaseRunning: tcell.ColorYellow,
sdk.JobPhaseSchedulingFailed: tcell.ColorRed,
sdk.JobPhaseStarting: tcell.ColorYellow,
sdk.JobPhaseSucceeded: tcell.ColorGreen,
sdk.JobPhaseTimedOut: tcell.ColorRed,
sdk.JobPhaseUnknown: tcell.ColorGrey,
}
var iconsByJobPhase = map[sdk.JobPhase]string{
sdk.JobPhaseAborted: "✖",
sdk.JobPhaseCanceled: "✖",
sdk.JobPhaseFailed: "✖",
sdk.JobPhasePending: "⟳",
sdk.JobPhaseRunning: "▶",
sdk.JobPhaseSchedulingFailed: "✖",
sdk.JobPhaseStarting: "▶",
sdk.JobPhaseSucceeded: "✔",
sdk.JobPhaseTimedOut: "✖",
sdk.JobPhaseUnknown: "?",
}
func getColorFromWorkerPhase(phase sdk.WorkerPhase) tcell.Color {
if color, ok := colorsByWorkerPhase[phase]; ok {
return color
}
return tcell.ColorGrey
}
func getTextColorFromWorkerPhase(phase sdk.WorkerPhase) string {
if color, ok := textColorsByWorkerPhase[phase]; ok {
return color
}
return "[grey]"
}
func getIconFromWorkerPhase(phase sdk.WorkerPhase) string {
if icon, ok := iconsByWorkerPhase[phase]; ok {
return icon
}
return "?"
}
func getColorFromJobPhase(phase sdk.JobPhase) tcell.Color {
if color, ok := colorsByJobPhase[phase]; ok {
return color
}
return tcell.ColorGrey
}
func getIconFromJobPhase(phase sdk.JobPhase) string {
if icon, ok := iconsByJobPhase[phase]; ok {
return icon
}
return "[grey]"
}
// formatDateTimeToString formats a time object to YYYY-MM-DD HH:MM:SS
// and returns it as a string
func formatDateTimeToString(time *time.Time) string {
if time == nil {
return ""
}
return time.UTC().Format("2006-01-02 15:04:05")
}
|
package mathematics
import (
. "github.com/numacci/go-algorithm/stl/function"
"math"
)
// Eratosthenes returns prime numbers in [1, n]
func Eratosthenes(n int) []int {
primes := make([]int, 0, n)
isPrime := make([]bool, n+1)
for i := 2; i <= n; i++ {
isPrime[i] = true
}
for i := 2; i <= n; i++ {
if !isPrime[i] {
continue
}
primes = append(primes, i)
for j := 2 * i; j <= n; j += i {
// exclude multiples of prime number 'i'
isPrime[j] = false
}
}
return primes
}
// SegmentEratosthenes returns prime numbers in [a, b]
func SegmentEratosthenes(a, b int) []int {
isPrimeAtoB := make([]bool, b-a+1) // sieve in [a, b]
isPrimeToSqrtB := make([]bool, int(math.Sqrt(float64(b)))+1) // sieve in [0, sqrt(b))
for i := 0; i*i <= b; i++ {
isPrimeToSqrtB[i] = true
}
for i := 0; i <= b-a; i++ {
isPrimeAtoB[i] = true
}
for i := 2; i*i <= b; i++ {
if !isPrimeToSqrtB[i] {
continue
}
for j := 2 * i; j*j <= b; j += i {
isPrimeToSqrtB[j] = false
}
for j := Max(i, (a+i-1)/i) * i; j <= b; j += i {
isPrimeAtoB[j-a] = false
}
}
primes := make([]int, 0, b-a+1)
for i := a; i <= b; i++ {
if isPrimeAtoB[i-a] {
primes = append(primes, i)
}
}
return primes
}
|
package syndicate
import (
"fmt"
"sync"
pb "github.com/getcfs/megacfs/syndicate/api/proto"
)
type RingSubscribers struct {
sync.RWMutex
subs map[string]chan *pb.Ring
}
func (s *Server) addRingSubscriber(id string) chan *pb.Ring {
s.ringSubs.Lock()
defer s.ringSubs.Unlock()
c, exists := s.ringSubs.subs[id]
if exists {
close(c)
s.ctxlog.WithField("id", id).Debug("ring subscriber entry already existed, closed origin chan")
}
s.ringSubs.subs[id] = make(chan *pb.Ring, 1)
s.metrics.subscriberNodes.Inc()
return s.ringSubs.subs[id]
}
func (s *Server) removeRingSubscriber(id string) error {
s.ringSubs.Lock()
defer s.ringSubs.Unlock()
c, ok := s.ringSubs.subs[id]
if !ok {
return fmt.Errorf("subscriber id not present")
}
close(c)
delete(s.ringSubs.subs, id)
s.metrics.subscriberNodes.Dec()
return nil
}
//ringSubscribersNotify listens for ring changes on s.subsChangeChan,
// and distributes them out to the chan's used by connected GetRingStream
// instances
func (s *Server) ringSubscribersNotify() {
for change := range s.subsChangeChan {
s.ringSubs.RLock()
ring := &pb.Ring{
Ring: *change.rb,
Version: change.v,
}
for id, ch := range s.ringSubs.subs {
go func(id string, ch chan *pb.Ring, ring *pb.Ring) {
ch <- ring
}(id, ch, ring)
}
s.ringSubs.RUnlock()
}
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/osconfig/alpha/osconfig_alpha_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig/alpha"
)
// OSPolicyAssignmentServer implements the gRPC interface for OSPolicyAssignment.
type OSPolicyAssignmentServer struct{}
// ProtoToOSPolicyAssignmentOSPoliciesModeEnum converts a OSPolicyAssignmentOSPoliciesModeEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum) *alpha.OSPolicyAssignmentOSPoliciesModeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentOSPoliciesModeEnum(n[len("OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(n[len("OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(n[len("OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(n[len("OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(n[len("OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(n[len("OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentRolloutStateEnum converts a OSPolicyAssignmentRolloutStateEnum enum from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentRolloutStateEnum(e alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum) *alpha.OSPolicyAssignmentRolloutStateEnum {
if e == 0 {
return nil
}
if n, ok := alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum_name[int32(e)]; ok {
e := alpha.OSPolicyAssignmentRolloutStateEnum(n[len("OsconfigAlphaOSPolicyAssignmentRolloutStateEnum"):])
return &e
}
return nil
}
// ProtoToOSPolicyAssignmentOSPolicies converts a OSPolicyAssignmentOSPolicies object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPolicies(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPolicies) *alpha.OSPolicyAssignmentOSPolicies {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPolicies{
Id: dcl.StringOrNil(p.GetId()),
Description: dcl.StringOrNil(p.GetDescription()),
Mode: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum(p.GetMode()),
AllowNoResourceGroupMatch: dcl.Bool(p.GetAllowNoResourceGroupMatch()),
}
for _, r := range p.GetResourceGroups() {
obj.ResourceGroups = append(obj.ResourceGroups, *ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroups(r))
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroups converts a OSPolicyAssignmentOSPoliciesResourceGroups object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroups(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroups) *alpha.OSPolicyAssignmentOSPoliciesResourceGroups {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroups{}
for _, r := range p.GetInventoryFilters() {
obj.InventoryFilters = append(obj.InventoryFilters, *ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(r))
}
for _, r := range p.GetResources() {
obj.Resources = append(obj.Resources, *ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResources(r))
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters converts a OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{
OSShortName: dcl.StringOrNil(p.GetOsShortName()),
OSVersion: dcl.StringOrNil(p.GetOsVersion()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResources converts a OSPolicyAssignmentOSPoliciesResourceGroupsResources object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResources(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResources) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResources {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResources{
Id: dcl.StringOrNil(p.GetId()),
Pkg: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(p.GetPkg()),
Repository: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(p.GetRepository()),
Exec: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(p.GetExec()),
File: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(p.GetFile()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{
DesiredState: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(p.GetDesiredState()),
Apt: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(p.GetApt()),
Deb: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(p.GetDeb()),
Yum: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(p.GetYum()),
Zypper: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(p.GetZypper()),
Rpm: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(p.GetRpm()),
Googet: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(p.GetGooget()),
Msi: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(p.GetMsi()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{
Source: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(p.GetSource()),
PullDeps: dcl.Bool(p.GetPullDeps()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{
Remote: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(p.GetRemote()),
Gcs: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(p.GetGcs()),
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
AllowInsecure: dcl.Bool(p.GetAllowInsecure()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{
Uri: dcl.StringOrNil(p.GetUri()),
Sha256Checksum: dcl.StringOrNil(p.GetSha256Checksum()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
Generation: dcl.Int64OrNil(p.GetGeneration()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{
Source: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(p.GetSource()),
PullDeps: dcl.Bool(p.GetPullDeps()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{
Remote: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(p.GetRemote()),
Gcs: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(p.GetGcs()),
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
AllowInsecure: dcl.Bool(p.GetAllowInsecure()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{
Uri: dcl.StringOrNil(p.GetUri()),
Sha256Checksum: dcl.StringOrNil(p.GetSha256Checksum()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
Generation: dcl.Int64OrNil(p.GetGeneration()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{
Source: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(p.GetSource()),
}
for _, r := range p.GetProperties() {
obj.Properties = append(obj.Properties, r)
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{
Remote: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(p.GetRemote()),
Gcs: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(p.GetGcs()),
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
AllowInsecure: dcl.Bool(p.GetAllowInsecure()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{
Uri: dcl.StringOrNil(p.GetUri()),
Sha256Checksum: dcl.StringOrNil(p.GetSha256Checksum()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
Generation: dcl.Int64OrNil(p.GetGeneration()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{
Apt: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(p.GetApt()),
Yum: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(p.GetYum()),
Zypper: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(p.GetZypper()),
Goo: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(p.GetGoo()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{
ArchiveType: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(p.GetArchiveType()),
Uri: dcl.StringOrNil(p.GetUri()),
Distribution: dcl.StringOrNil(p.GetDistribution()),
GpgKey: dcl.StringOrNil(p.GetGpgKey()),
}
for _, r := range p.GetComponents() {
obj.Components = append(obj.Components, r)
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{
Id: dcl.StringOrNil(p.GetId()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
BaseUrl: dcl.StringOrNil(p.GetBaseUrl()),
}
for _, r := range p.GetGpgKeys() {
obj.GpgKeys = append(obj.GpgKeys, r)
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{
Id: dcl.StringOrNil(p.GetId()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
BaseUrl: dcl.StringOrNil(p.GetBaseUrl()),
}
for _, r := range p.GetGpgKeys() {
obj.GpgKeys = append(obj.GpgKeys, r)
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{
Name: dcl.StringOrNil(p.GetName()),
Url: dcl.StringOrNil(p.GetUrl()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{
Validate: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(p.GetValidate()),
Enforce: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(p.GetEnforce()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{
File: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(p.GetFile()),
Script: dcl.StringOrNil(p.GetScript()),
Interpreter: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(p.GetInterpreter()),
OutputFilePath: dcl.StringOrNil(p.GetOutputFilePath()),
}
for _, r := range p.GetArgs() {
obj.Args = append(obj.Args, r)
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{
Remote: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(p.GetRemote()),
Gcs: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(p.GetGcs()),
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
AllowInsecure: dcl.Bool(p.GetAllowInsecure()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{
Uri: dcl.StringOrNil(p.GetUri()),
Sha256Checksum: dcl.StringOrNil(p.GetSha256Checksum()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
Generation: dcl.Int64OrNil(p.GetGeneration()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{
File: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(p.GetFile()),
Script: dcl.StringOrNil(p.GetScript()),
Interpreter: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(p.GetInterpreter()),
OutputFilePath: dcl.StringOrNil(p.GetOutputFilePath()),
}
for _, r := range p.GetArgs() {
obj.Args = append(obj.Args, r)
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{
Remote: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(p.GetRemote()),
Gcs: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(p.GetGcs()),
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
AllowInsecure: dcl.Bool(p.GetAllowInsecure()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{
Uri: dcl.StringOrNil(p.GetUri()),
Sha256Checksum: dcl.StringOrNil(p.GetSha256Checksum()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
Generation: dcl.Int64OrNil(p.GetGeneration()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{
File: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(p.GetFile()),
Content: dcl.StringOrNil(p.GetContent()),
Path: dcl.StringOrNil(p.GetPath()),
State: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(p.GetState()),
Permissions: dcl.StringOrNil(p.GetPermissions()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{
Remote: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(p.GetRemote()),
Gcs: ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(p.GetGcs()),
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
AllowInsecure: dcl.Bool(p.GetAllowInsecure()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{
Uri: dcl.StringOrNil(p.GetUri()),
Sha256Checksum: dcl.StringOrNil(p.GetSha256Checksum()),
}
return obj
}
// ProtoToOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs(p *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
Generation: dcl.Int64OrNil(p.GetGeneration()),
}
return obj
}
// ProtoToOSPolicyAssignmentInstanceFilter converts a OSPolicyAssignmentInstanceFilter object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilter(p *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilter) *alpha.OSPolicyAssignmentInstanceFilter {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentInstanceFilter{
All: dcl.Bool(p.GetAll()),
}
for _, r := range p.GetInclusionLabels() {
obj.InclusionLabels = append(obj.InclusionLabels, *ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabels(r))
}
for _, r := range p.GetExclusionLabels() {
obj.ExclusionLabels = append(obj.ExclusionLabels, *ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabels(r))
}
for _, r := range p.GetInventories() {
obj.Inventories = append(obj.Inventories, *ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilterInventories(r))
}
return obj
}
// ProtoToOSPolicyAssignmentInstanceFilterInclusionLabels converts a OSPolicyAssignmentInstanceFilterInclusionLabels object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabels(p *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabels) *alpha.OSPolicyAssignmentInstanceFilterInclusionLabels {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentInstanceFilterInclusionLabels{}
return obj
}
// ProtoToOSPolicyAssignmentInstanceFilterExclusionLabels converts a OSPolicyAssignmentInstanceFilterExclusionLabels object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabels(p *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabels) *alpha.OSPolicyAssignmentInstanceFilterExclusionLabels {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentInstanceFilterExclusionLabels{}
return obj
}
// ProtoToOSPolicyAssignmentInstanceFilterInventories converts a OSPolicyAssignmentInstanceFilterInventories object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilterInventories(p *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInventories) *alpha.OSPolicyAssignmentInstanceFilterInventories {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentInstanceFilterInventories{
OSShortName: dcl.StringOrNil(p.GetOsShortName()),
OSVersion: dcl.StringOrNil(p.GetOsVersion()),
}
return obj
}
// ProtoToOSPolicyAssignmentRollout converts a OSPolicyAssignmentRollout object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentRollout(p *alphapb.OsconfigAlphaOSPolicyAssignmentRollout) *alpha.OSPolicyAssignmentRollout {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentRollout{
DisruptionBudget: ProtoToOsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudget(p.GetDisruptionBudget()),
MinWaitDuration: dcl.StringOrNil(p.GetMinWaitDuration()),
}
return obj
}
// ProtoToOSPolicyAssignmentRolloutDisruptionBudget converts a OSPolicyAssignmentRolloutDisruptionBudget object from its proto representation.
func ProtoToOsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudget(p *alphapb.OsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudget) *alpha.OSPolicyAssignmentRolloutDisruptionBudget {
if p == nil {
return nil
}
obj := &alpha.OSPolicyAssignmentRolloutDisruptionBudget{
Fixed: dcl.Int64OrNil(p.GetFixed()),
Percent: dcl.Int64OrNil(p.GetPercent()),
}
return obj
}
// ProtoToOSPolicyAssignment converts a OSPolicyAssignment resource from its proto representation.
func ProtoToOSPolicyAssignment(p *alphapb.OsconfigAlphaOSPolicyAssignment) *alpha.OSPolicyAssignment {
obj := &alpha.OSPolicyAssignment{
Name: dcl.StringOrNil(p.GetName()),
Description: dcl.StringOrNil(p.GetDescription()),
InstanceFilter: ProtoToOsconfigAlphaOSPolicyAssignmentInstanceFilter(p.GetInstanceFilter()),
Rollout: ProtoToOsconfigAlphaOSPolicyAssignmentRollout(p.GetRollout()),
RevisionId: dcl.StringOrNil(p.GetRevisionId()),
RevisionCreateTime: dcl.StringOrNil(p.GetRevisionCreateTime()),
Etag: dcl.StringOrNil(p.GetEtag()),
RolloutState: ProtoToOsconfigAlphaOSPolicyAssignmentRolloutStateEnum(p.GetRolloutState()),
Baseline: dcl.Bool(p.GetBaseline()),
Deleted: dcl.Bool(p.GetDeleted()),
Reconciling: dcl.Bool(p.GetReconciling()),
Uid: dcl.StringOrNil(p.GetUid()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
SkipAwaitRollout: dcl.Bool(p.GetSkipAwaitRollout()),
}
for _, r := range p.GetOsPolicies() {
obj.OSPolicies = append(obj.OSPolicies, *ProtoToOsconfigAlphaOSPolicyAssignmentOSPolicies(r))
}
return obj
}
// OSPolicyAssignmentOSPoliciesModeEnumToProto converts a OSPolicyAssignmentOSPoliciesModeEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnumToProto(e *alpha.OSPolicyAssignmentOSPoliciesModeEnum) alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum_value["OSPolicyAssignmentOSPoliciesModeEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnum(0)
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumToProto(e *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum) alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum_value["OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnum(0)
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumToProto(e *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum) alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum_value["OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnum(0)
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumToProto(e *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum) alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum_value["OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnum(0)
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumToProto(e *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum) alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum_value["OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnum(0)
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumToProto(e *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum) alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum_value["OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnum(0)
}
// OSPolicyAssignmentRolloutStateEnumToProto converts a OSPolicyAssignmentRolloutStateEnum enum to its proto representation.
func OsconfigAlphaOSPolicyAssignmentRolloutStateEnumToProto(e *alpha.OSPolicyAssignmentRolloutStateEnum) alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum {
if e == nil {
return alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum(0)
}
if v, ok := alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum_value["OSPolicyAssignmentRolloutStateEnum"+string(*e)]; ok {
return alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum(v)
}
return alphapb.OsconfigAlphaOSPolicyAssignmentRolloutStateEnum(0)
}
// OSPolicyAssignmentOSPoliciesToProto converts a OSPolicyAssignmentOSPolicies object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesToProto(o *alpha.OSPolicyAssignmentOSPolicies) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPolicies {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPolicies{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetDescription(dcl.ValueOrEmptyString(o.Description))
p.SetMode(OsconfigAlphaOSPolicyAssignmentOSPoliciesModeEnumToProto(o.Mode))
p.SetAllowNoResourceGroupMatch(dcl.ValueOrEmptyBool(o.AllowNoResourceGroupMatch))
sResourceGroups := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroups, len(o.ResourceGroups))
for i, r := range o.ResourceGroups {
sResourceGroups[i] = OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsToProto(&r)
}
p.SetResourceGroups(sResourceGroups)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroups object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroups) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroups {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroups{}
sInventoryFilters := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters, len(o.InventoryFilters))
for i, r := range o.InventoryFilters {
sInventoryFilters[i] = OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersToProto(&r)
}
p.SetInventoryFilters(sInventoryFilters)
sResources := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResources, len(o.Resources))
for i, r := range o.Resources {
sResources[i] = OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesToProto(&r)
}
p.SetResources(sResources)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFiltersToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsInventoryFilters{}
p.SetOsShortName(dcl.ValueOrEmptyString(o.OSShortName))
p.SetOsVersion(dcl.ValueOrEmptyString(o.OSVersion))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResources object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResources) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResources {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResources{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetPkg(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgToProto(o.Pkg))
p.SetRepository(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryToProto(o.Repository))
p.SetExec(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecToProto(o.Exec))
p.SetFile(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileToProto(o.File))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkg{}
p.SetDesiredState(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDesiredStateEnumToProto(o.DesiredState))
p.SetApt(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptToProto(o.Apt))
p.SetDeb(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebToProto(o.Deb))
p.SetYum(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumToProto(o.Yum))
p.SetZypper(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperToProto(o.Zypper))
p.SetRpm(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmToProto(o.Rpm))
p.SetGooget(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetToProto(o.Googet))
p.SetMsi(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiToProto(o.Msi))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgAptToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgApt{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDeb{}
p.SetSource(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceToProto(o.Source))
p.SetPullDeps(dcl.ValueOrEmptyBool(o.PullDeps))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSource{}
p.SetRemote(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteToProto(o.Remote))
p.SetGcs(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsToProto(o.Gcs))
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetAllowInsecure(dcl.ValueOrEmptyBool(o.AllowInsecure))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemoteToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceRemote{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetSha256Checksum(dcl.ValueOrEmptyString(o.Sha256Checksum))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgDebSourceGcs{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGeneration(dcl.ValueOrEmptyInt64(o.Generation))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYumToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgYum{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypperToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgZypper{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpm{}
p.SetSource(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceToProto(o.Source))
p.SetPullDeps(dcl.ValueOrEmptyBool(o.PullDeps))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSource{}
p.SetRemote(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteToProto(o.Remote))
p.SetGcs(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsToProto(o.Gcs))
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetAllowInsecure(dcl.ValueOrEmptyBool(o.AllowInsecure))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemoteToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceRemote{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetSha256Checksum(dcl.ValueOrEmptyString(o.Sha256Checksum))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgRpmSourceGcs{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGeneration(dcl.ValueOrEmptyInt64(o.Generation))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGoogetToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgGooget{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsi{}
p.SetSource(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceToProto(o.Source))
sProperties := make([]string, len(o.Properties))
for i, r := range o.Properties {
sProperties[i] = r
}
p.SetProperties(sProperties)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSource{}
p.SetRemote(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteToProto(o.Remote))
p.SetGcs(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsToProto(o.Gcs))
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetAllowInsecure(dcl.ValueOrEmptyBool(o.AllowInsecure))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemoteToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceRemote{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetSha256Checksum(dcl.ValueOrEmptyString(o.Sha256Checksum))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesPkgMsiSourceGcs{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGeneration(dcl.ValueOrEmptyInt64(o.Generation))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepository{}
p.SetApt(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptToProto(o.Apt))
p.SetYum(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumToProto(o.Yum))
p.SetZypper(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperToProto(o.Zypper))
p.SetGoo(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooToProto(o.Goo))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryApt{}
p.SetArchiveType(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryAptArchiveTypeEnumToProto(o.ArchiveType))
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetDistribution(dcl.ValueOrEmptyString(o.Distribution))
p.SetGpgKey(dcl.ValueOrEmptyString(o.GpgKey))
sComponents := make([]string, len(o.Components))
for i, r := range o.Components {
sComponents[i] = r
}
p.SetComponents(sComponents)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYumToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryYum{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetDisplayName(dcl.ValueOrEmptyString(o.DisplayName))
p.SetBaseUrl(dcl.ValueOrEmptyString(o.BaseUrl))
sGpgKeys := make([]string, len(o.GpgKeys))
for i, r := range o.GpgKeys {
sGpgKeys[i] = r
}
p.SetGpgKeys(sGpgKeys)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypperToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryZypper{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetDisplayName(dcl.ValueOrEmptyString(o.DisplayName))
p.SetBaseUrl(dcl.ValueOrEmptyString(o.BaseUrl))
sGpgKeys := make([]string, len(o.GpgKeys))
for i, r := range o.GpgKeys {
sGpgKeys[i] = r
}
p.SetGpgKeys(sGpgKeys)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGooToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesRepositoryGoo{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
p.SetUrl(dcl.ValueOrEmptyString(o.Url))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExec{}
p.SetValidate(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateToProto(o.Validate))
p.SetEnforce(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceToProto(o.Enforce))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidate{}
p.SetFile(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileToProto(o.File))
p.SetScript(dcl.ValueOrEmptyString(o.Script))
p.SetInterpreter(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateInterpreterEnumToProto(o.Interpreter))
p.SetOutputFilePath(dcl.ValueOrEmptyString(o.OutputFilePath))
sArgs := make([]string, len(o.Args))
for i, r := range o.Args {
sArgs[i] = r
}
p.SetArgs(sArgs)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFile{}
p.SetRemote(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteToProto(o.Remote))
p.SetGcs(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsToProto(o.Gcs))
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetAllowInsecure(dcl.ValueOrEmptyBool(o.AllowInsecure))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemoteToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileRemote{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetSha256Checksum(dcl.ValueOrEmptyString(o.Sha256Checksum))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecValidateFileGcs{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGeneration(dcl.ValueOrEmptyInt64(o.Generation))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforce{}
p.SetFile(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileToProto(o.File))
p.SetScript(dcl.ValueOrEmptyString(o.Script))
p.SetInterpreter(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceInterpreterEnumToProto(o.Interpreter))
p.SetOutputFilePath(dcl.ValueOrEmptyString(o.OutputFilePath))
sArgs := make([]string, len(o.Args))
for i, r := range o.Args {
sArgs[i] = r
}
p.SetArgs(sArgs)
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFile{}
p.SetRemote(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteToProto(o.Remote))
p.SetGcs(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsToProto(o.Gcs))
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetAllowInsecure(dcl.ValueOrEmptyBool(o.AllowInsecure))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemoteToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileRemote{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetSha256Checksum(dcl.ValueOrEmptyString(o.Sha256Checksum))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesExecEnforceFileGcs{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGeneration(dcl.ValueOrEmptyInt64(o.Generation))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFile{}
p.SetFile(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileToProto(o.File))
p.SetContent(dcl.ValueOrEmptyString(o.Content))
p.SetPath(dcl.ValueOrEmptyString(o.Path))
p.SetState(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileStateEnumToProto(o.State))
p.SetPermissions(dcl.ValueOrEmptyString(o.Permissions))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFile{}
p.SetRemote(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteToProto(o.Remote))
p.SetGcs(OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsToProto(o.Gcs))
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetAllowInsecure(dcl.ValueOrEmptyBool(o.AllowInsecure))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemoteToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileRemote{}
p.SetUri(dcl.ValueOrEmptyString(o.Uri))
p.SetSha256Checksum(dcl.ValueOrEmptyString(o.Sha256Checksum))
return p
}
// OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsToProto converts a OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcsToProto(o *alpha.OSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs) *alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentOSPoliciesResourceGroupsResourcesFileFileGcs{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGeneration(dcl.ValueOrEmptyInt64(o.Generation))
return p
}
// OSPolicyAssignmentInstanceFilterToProto converts a OSPolicyAssignmentInstanceFilter object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentInstanceFilterToProto(o *alpha.OSPolicyAssignmentInstanceFilter) *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilter {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilter{}
p.SetAll(dcl.ValueOrEmptyBool(o.All))
sInclusionLabels := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabels, len(o.InclusionLabels))
for i, r := range o.InclusionLabels {
sInclusionLabels[i] = OsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabelsToProto(&r)
}
p.SetInclusionLabels(sInclusionLabels)
sExclusionLabels := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabels, len(o.ExclusionLabels))
for i, r := range o.ExclusionLabels {
sExclusionLabels[i] = OsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabelsToProto(&r)
}
p.SetExclusionLabels(sExclusionLabels)
sInventories := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInventories, len(o.Inventories))
for i, r := range o.Inventories {
sInventories[i] = OsconfigAlphaOSPolicyAssignmentInstanceFilterInventoriesToProto(&r)
}
p.SetInventories(sInventories)
return p
}
// OSPolicyAssignmentInstanceFilterInclusionLabelsToProto converts a OSPolicyAssignmentInstanceFilterInclusionLabels object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabelsToProto(o *alpha.OSPolicyAssignmentInstanceFilterInclusionLabels) *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabels {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInclusionLabels{}
mLabels := make(map[string]string, len(o.Labels))
for k, r := range o.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
return p
}
// OSPolicyAssignmentInstanceFilterExclusionLabelsToProto converts a OSPolicyAssignmentInstanceFilterExclusionLabels object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabelsToProto(o *alpha.OSPolicyAssignmentInstanceFilterExclusionLabels) *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabels {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterExclusionLabels{}
mLabels := make(map[string]string, len(o.Labels))
for k, r := range o.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
return p
}
// OSPolicyAssignmentInstanceFilterInventoriesToProto converts a OSPolicyAssignmentInstanceFilterInventories object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentInstanceFilterInventoriesToProto(o *alpha.OSPolicyAssignmentInstanceFilterInventories) *alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInventories {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentInstanceFilterInventories{}
p.SetOsShortName(dcl.ValueOrEmptyString(o.OSShortName))
p.SetOsVersion(dcl.ValueOrEmptyString(o.OSVersion))
return p
}
// OSPolicyAssignmentRolloutToProto converts a OSPolicyAssignmentRollout object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentRolloutToProto(o *alpha.OSPolicyAssignmentRollout) *alphapb.OsconfigAlphaOSPolicyAssignmentRollout {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentRollout{}
p.SetDisruptionBudget(OsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudgetToProto(o.DisruptionBudget))
p.SetMinWaitDuration(dcl.ValueOrEmptyString(o.MinWaitDuration))
return p
}
// OSPolicyAssignmentRolloutDisruptionBudgetToProto converts a OSPolicyAssignmentRolloutDisruptionBudget object to its proto representation.
func OsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudgetToProto(o *alpha.OSPolicyAssignmentRolloutDisruptionBudget) *alphapb.OsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudget {
if o == nil {
return nil
}
p := &alphapb.OsconfigAlphaOSPolicyAssignmentRolloutDisruptionBudget{}
p.SetFixed(dcl.ValueOrEmptyInt64(o.Fixed))
p.SetPercent(dcl.ValueOrEmptyInt64(o.Percent))
return p
}
// OSPolicyAssignmentToProto converts a OSPolicyAssignment resource to its proto representation.
func OSPolicyAssignmentToProto(resource *alpha.OSPolicyAssignment) *alphapb.OsconfigAlphaOSPolicyAssignment {
p := &alphapb.OsconfigAlphaOSPolicyAssignment{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetInstanceFilter(OsconfigAlphaOSPolicyAssignmentInstanceFilterToProto(resource.InstanceFilter))
p.SetRollout(OsconfigAlphaOSPolicyAssignmentRolloutToProto(resource.Rollout))
p.SetRevisionId(dcl.ValueOrEmptyString(resource.RevisionId))
p.SetRevisionCreateTime(dcl.ValueOrEmptyString(resource.RevisionCreateTime))
p.SetEtag(dcl.ValueOrEmptyString(resource.Etag))
p.SetRolloutState(OsconfigAlphaOSPolicyAssignmentRolloutStateEnumToProto(resource.RolloutState))
p.SetBaseline(dcl.ValueOrEmptyBool(resource.Baseline))
p.SetDeleted(dcl.ValueOrEmptyBool(resource.Deleted))
p.SetReconciling(dcl.ValueOrEmptyBool(resource.Reconciling))
p.SetUid(dcl.ValueOrEmptyString(resource.Uid))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetSkipAwaitRollout(dcl.ValueOrEmptyBool(resource.SkipAwaitRollout))
sOSPolicies := make([]*alphapb.OsconfigAlphaOSPolicyAssignmentOSPolicies, len(resource.OSPolicies))
for i, r := range resource.OSPolicies {
sOSPolicies[i] = OsconfigAlphaOSPolicyAssignmentOSPoliciesToProto(&r)
}
p.SetOsPolicies(sOSPolicies)
return p
}
// applyOSPolicyAssignment handles the gRPC request by passing it to the underlying OSPolicyAssignment Apply() method.
func (s *OSPolicyAssignmentServer) applyOSPolicyAssignment(ctx context.Context, c *alpha.Client, request *alphapb.ApplyOsconfigAlphaOSPolicyAssignmentRequest) (*alphapb.OsconfigAlphaOSPolicyAssignment, error) {
p := ProtoToOSPolicyAssignment(request.GetResource())
res, err := c.ApplyOSPolicyAssignment(ctx, p)
if err != nil {
return nil, err
}
r := OSPolicyAssignmentToProto(res)
return r, nil
}
// applyOsconfigAlphaOSPolicyAssignment handles the gRPC request by passing it to the underlying OSPolicyAssignment Apply() method.
func (s *OSPolicyAssignmentServer) ApplyOsconfigAlphaOSPolicyAssignment(ctx context.Context, request *alphapb.ApplyOsconfigAlphaOSPolicyAssignmentRequest) (*alphapb.OsconfigAlphaOSPolicyAssignment, error) {
cl, err := createConfigOSPolicyAssignment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyOSPolicyAssignment(ctx, cl, request)
}
// DeleteOSPolicyAssignment handles the gRPC request by passing it to the underlying OSPolicyAssignment Delete() method.
func (s *OSPolicyAssignmentServer) DeleteOsconfigAlphaOSPolicyAssignment(ctx context.Context, request *alphapb.DeleteOsconfigAlphaOSPolicyAssignmentRequest) (*emptypb.Empty, error) {
cl, err := createConfigOSPolicyAssignment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteOSPolicyAssignment(ctx, ProtoToOSPolicyAssignment(request.GetResource()))
}
// ListOsconfigAlphaOSPolicyAssignment handles the gRPC request by passing it to the underlying OSPolicyAssignmentList() method.
func (s *OSPolicyAssignmentServer) ListOsconfigAlphaOSPolicyAssignment(ctx context.Context, request *alphapb.ListOsconfigAlphaOSPolicyAssignmentRequest) (*alphapb.ListOsconfigAlphaOSPolicyAssignmentResponse, error) {
cl, err := createConfigOSPolicyAssignment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListOSPolicyAssignment(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*alphapb.OsconfigAlphaOSPolicyAssignment
for _, r := range resources.Items {
rp := OSPolicyAssignmentToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListOsconfigAlphaOSPolicyAssignmentResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigOSPolicyAssignment(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
/*
* Copyright (c) 2018 Jeffrey Walter <jeffreydwalter@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package arlo
// URL is part of the Status message fragment returned by most calls to the Arlo API.
// URL is only populated when Success is false.
type Data struct {
Message string `json:"message,omitempty"`
Reason string `json:"reason,omitempty"`
Error string `json:"error,omitempty"`
}
// Status is the message fragment returned from most http calls to the Arlo API.
type Status struct {
Data `json:"URL,omitempty"`
Success bool `json:"success"`
}
// LoginResponse is an intermediate struct used when parsing data from the Login() call.
type LoginResponse struct {
Data Account
Status
}
type SessionResponse struct {
Data Session
Status
}
type UserProfileResponse struct {
Data UserProfile
Status
}
// DeviceResponse is an intermediate struct used when parsing data from the GetDevices() call.
type DeviceResponse struct {
Data Devices
Status
}
// LibraryMetaDataResponse is an intermediate struct used when parsing data from the GetLibraryMetaData() call.
type LibraryMetaDataResponse struct {
Data LibraryMetaData
Status
}
type LibraryResponse struct {
Data Library
Status
}
type CvrPlaylistResponse struct {
Data CvrPlaylist
Status
}
type Stream struct {
URL string `json:"url"`
}
type StreamResponse struct {
Data Stream
Status
}
type RecordingResponse struct {
Data Stream
Status
}
type EventStreamResponse struct {
EventStreamPayload
Status string `json:"status,omitempty"`
}
|
package main
import "fmt"
func main() {
// a := 'a' //rune ?
// z := 'z'
// fmt.Println(int(a)," ",int(z)) //print ascii
// var b int=68
// fmt.Println(string(b),"\n") //print character from ascii
// q :=[5]int{68,69,70,71,72}
// for i:=0;i<=4;i++{
// fmt.Println(string(q[i]))
// }
var input [5]rune
for i:=0;i<5;i++{
fmt.Scanf("%c",&input[i])
}
//encoding
fmt.Println("Encoding.....")
fmt.Println("ascii of input before conversion :",input)
for j:=0;j<5;j++{
//input[j]=input[j]+(input[j]-96)
input[j]=input[j]+1
input[j]=input[j]-40
}
fmt.Println("ascii of input after conversion :",input)
//encoded string
fmt.Println("\nThe encoded string is :")
for i:=0;i<5;i++{
fmt.Printf("%c",input[i])
}
//decoding
for k:=0;k<5;k++{
//input[k]=input[k]-1
input[k]=input[k]+39
}
fmt.Println("\nThe decoded string is :")
for i:=0;i<5;i++{
fmt.Printf("%c",input[i])
}
}
|
package drivers
// 存储设备驱动接口
//type Driver interface {
// Register(*Service, ...RegisterOption) error
// Deregister(*Service) error
//}
|
package main
import "testing"
func TestMultiply(t *testing.T){
var v int
v = multiply([]int{1,4,8,6}...)
exp := 192
if v != exp {
t.Errorf("Expected: %v\nGot: %v\n", exp, v)
}
var w int
input := []int{1, 4, 8, 0}
w = multiply(input...)
exp = 0
if w != exp {
t.Errorf("Expected: %v\nGot: %v\n", exp, w)
}
}
|
package cli
import (
"testing"
)
func TestValidateAlphaNumeric(t *testing.T) {
rule := "alphaNumeric"
testCases := []struct {
name string
ui string
ph string
v []validator
want bool
}{
{
"letters",
"abc",
"var1",
[]validator{{
expression: "",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
true,
},
{
"hyphen",
"a-bc",
"var1",
[]validator{{
expression: "",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
false,
},
{
"underscore",
"a_bc",
"var1",
[]validator{{
expression: "",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
false,
},
{
"numbers",
"123",
"var1",
[]validator{{
expression: "",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
true,
},
{
"lettersAndNumbers",
"acb123",
"var1",
[]validator{{
expression: "",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
true,
},
{
"specialChars",
"*.(#",
"var1",
[]validator{{
expression: "",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, _ := Validate(tc.ui, tc.ph, tc.v)
if got != tc.want {
t.Errorf("got %v want %v", got, tc.want)
}
})
}
}
func TestValidateRegExp(t *testing.T) {
rule := "regExp"
testCases := []struct {
name string
ui string
ph string
v []validator
want bool
}{
{
"compilesAndValidInput",
"abc",
"var1",
[]validator{{
expression: "[a-z]",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
true,
},
{
"compilesAndInvalidInput",
"ABC",
"var1",
[]validator{{
expression: "[a-z]",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, _ := Validate(tc.ui, tc.ph, tc.v)
if got != tc.want {
t.Errorf("got %v want %v", got, tc.want)
}
})
}
}
func TestValidateRegExpCompileError(t *testing.T) {
rule := "regExp"
tc := struct {
name string
ui string
ph string
v []validator
want bool
}{
"doesNotCompiles",
"ABC",
"var1",
[]validator{{
expression: "[a-z",
fields: []string{"var1"},
rule: rule,
message: "var1 failed to validate",
}},
false,
}
got, e := Validate(tc.ui, tc.ph, tc.v)
if got != tc.want {
t.Errorf("got %v want %v", got, tc.want)
}
if e == nil {
t.Errorf("got %v want an error", e)
}
}
|
package streaming
import (
"fmt"
"github.com/gorilla/mux"
"github.com/jlingohr/p2pvstream/hls"
"github.com/jlingohr/p2pvstream/settings"
"github.com/jlingohr/p2pvstream/stringutil"
"log"
"net/http"
"os"
"sync"
"time"
)
type DiscoveredFile struct {
Filename string
NodeName string
Timestamp int64
}
type M3UReq struct {
MetaName string
ResponseChan chan string
}
type SegmentReq struct {
SegName string
MetaName string
ResponseChan chan string
}
type StreamedFile struct {
NoExtFilename string
MetaName string
SegCount int
}
type ClientStartStreamReq struct {
File StreamedFile
StopStream chan struct{}
}
type ClientStreamHandler struct {
IsStreaming bool
File StreamedFile
SegReqs chan SegmentReq
M3UReqs chan M3UReq
StopStreaming chan struct{}
DiscoveredFiles map[string]DiscoveredFile
Comm ClientStreamComm
config settings.Config
m sync.RWMutex
}
type ClientStreamComm struct {
DiscoveredFiles chan DiscoveredFile
StreamFileRequest chan DiscoveredFile
StartStream chan ClientStartStreamReq
}
func NewClientStreamHandler(config settings.Config, comm ClientStreamComm) *ClientStreamHandler {
sh := &ClientStreamHandler{
IsStreaming: false,
M3UReqs: make(chan M3UReq),
SegReqs: make(chan SegmentReq),
StopStreaming: make(chan struct{}),
File: StreamedFile{NoExtFilename: "hlstest", MetaName: "hlstest.m3u8", SegCount: 10},
DiscoveredFiles: make(map[string]DiscoveredFile),
Comm: comm,
config: config,
}
return sh
}
func (sh *ClientStreamHandler) Start() {
go sh.handleComm()
go sh.startHttpServer()
}
func (sh *ClientStreamHandler) startHttpServer() {
router := mux.NewRouter()
router.StrictSlash(true)
router.HandleFunc("/stream/{fname}/{segName}/", sh.ServeTs).Methods("GET")
router.HandleFunc("/stream/{fname}/", sh.ServeMeta).Methods("GET")
router.HandleFunc("/initStream/{fname}/", sh.HandleStreamInit).Methods("GET")
router.HandleFunc("/stopStream/", sh.HandleStopStream).Methods("GET")
router.HandleFunc("/", sh.ServeDiscoveredFiles).Methods("GET")
log.Printf("Starting HLS streaming server on port %s", sh.config.HTTPBindPort)
log.Printf("Stream will be accessible via http://%s/stream/[filename.m3u8]/", sh.config.HTTPBindPort)
log.Fatal(http.ListenAndServe(sh.config.HTTPBindPort, router))
}
func (sh *ClientStreamHandler) handleComm() {
// todo extract constants somewhere
removeStaleTicker := time.NewTicker((time.Duration(sh.config.FileExpiry) + 5) * time.Second).C
for {
select {
case file := <-sh.Comm.DiscoveredFiles:
sh.m.Lock()
sh.DiscoveredFiles[file.Filename] = file
sh.m.Unlock()
case <-removeStaleTicker:
filesToRemove := make([]string, 0)
sh.m.RLock()
for _, file := range sh.DiscoveredFiles {
if time.Now().After(time.Unix(file.Timestamp, 0).Add(time.Duration(sh.config.FileExpiry) * time.Second)) {
filesToRemove = append(filesToRemove, file.Filename)
}
}
sh.m.RUnlock()
sh.m.Lock()
for _, fname := range filesToRemove {
delete(sh.DiscoveredFiles, fname)
}
sh.m.Unlock()
log.Println("Discovered files:", sh.DiscoveredFiles)
case req := <-sh.Comm.StartStream:
if sh.IsStreaming {
// should never reach here
log.Println("Attempted to start a stream without properly terminating previous session.")
continue
}
sh.File = req.File
go sh.handleHLSRequests(req.StopStream)
}
}
}
func (sh *ClientStreamHandler) handleHLSRequests(stop chan struct{}) {
sh.m.Lock()
sh.IsStreaming = true
sh.StopStreaming = stop
sh.m.Unlock()
pendingReqs := make(map[string]chan string)
ticker := time.NewTicker(3 * time.Second).C
for {
select {
case <- sh.StopStreaming:
sh.m.Lock()
sh.IsStreaming = false
sh.m.Unlock()
return
case req := <-sh.SegReqs:
noExtFilename := stringutil.RemoveFilenameExt(req.MetaName)
if noExtFilename != sh.File.NoExtFilename {
req.ResponseChan <- "" // return no content
continue
}
cachedPath := fmt.Sprintf("%s%s/%s/%s", hls.CACHE_PATH, sh.config.NodeName, noExtFilename, req.SegName)
if _, err := os.Stat(cachedPath); os.IsNotExist(err) {
pendingReqs[cachedPath] = req.ResponseChan
continue
}
req.ResponseChan <- cachedPath
case req := <-sh.M3UReqs:
noExtFilename := stringutil.RemoveFilenameExt(req.MetaName)
if noExtFilename != sh.File.NoExtFilename {
req.ResponseChan <- "" // return no content
continue
}
cachedPath := fmt.Sprintf("%s%s/%s/%s", hls.CACHE_PATH, sh.config.NodeName, noExtFilename, req.MetaName)
if _, err := os.Stat(cachedPath); os.IsNotExist(err) {
req.ResponseChan <- ""
continue
}
req.ResponseChan <- cachedPath
case <- ticker:
if len(pendingReqs) == 0 {
continue
}
responded := make([]string, 0)
for path := range pendingReqs {
if _, err := os.Stat(path); !os.IsNotExist(err) {
responded = append(responded, path)
respChan := pendingReqs[path]
respChan <- path
}
}
for _, path := range responded {
delete(pendingReqs, path)
}
}
}
}
|
package manager
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"testing"
)
func getTestServer() (*Server, *httptest.Server) {
m := new(Server)
m.startFileWatcher()
handler := m.getManagerRouting()
server := httptest.NewServer(handler)
return m, server
}
func TestStaticServing(t *testing.T) {
_, server := getTestServer()
defer server.Close()
resp, err := http.Get(server.URL + "/livereload.js")
if err != nil {
t.Error("Request to local manager server failed")
}
if resp.StatusCode != 200 || resp.ContentLength < 500 {
t.Error("Local request to livereload script failed")
}
}
func TestCreateServerRequest(t *testing.T) {
m, server := getTestServer()
defer server.Close()
tempDir, _ := ioutil.TempDir("", "webby-test")
defer os.RemoveAll(tempDir)
resp, err := http.PostForm(server.URL+"/create-server",
url.Values{"root_path": {tempDir}})
// body, _ := ioutil.ReadAll(resp.Body)
// t.Log(string(body))
if err != nil || resp.StatusCode != http.StatusOK {
t.Error("Create Server request failed")
}
if len(m.FileServers) != 1 {
t.Error("A file server was not created")
t.FailNow()
}
if m.FileServers[0].RootPath != tempDir {
t.Error("New fileserver path is incorrect")
}
}
func TestDeleteServerRequest(t *testing.T) {
m, server := getTestServer()
defer server.Close()
// Create a file server
tempDir, _ := ioutil.TempDir("", "webby-test")
defer os.RemoveAll(tempDir)
resp, err := http.PostForm(server.URL+"/create-server",
url.Values{"root_path": {tempDir}})
if len(m.FileServers) != 1 {
t.Error("A file server was not created")
t.FailNow()
}
if len(m.WatchedFolders) != 1 {
t.Error("A file watched was not created")
t.FailNow()
}
fileServerId := m.FileServers[0].ID
resp, err = http.Get(server.URL + fmt.Sprintf("/delete-server?id=%d", fileServerId))
if err != nil {
t.Fatal(err.Error())
} else if resp.StatusCode != http.StatusOK {
t.Errorf("Delete request returned reponse code %d", resp.StatusCode)
}
if resp.Request.URL.String() != server.URL+"/" {
t.Error("Response did not redirect")
}
if len(m.FileServers) > 0 {
t.Error("The manager fileserver was not deleted")
}
if len(m.WatchedFolders) > 0 {
t.Error("The manager folder watcher was not deleted")
}
}
|
package api
import (
"net/http"
"github.com/gin-gonic/gin"
"advance-go/internal/config"
"advance-go/internal/ping"
"advance-go/internal/project"
"advance-go/internal/score"
)
type Route struct {
Name string
Path string
Method string
Endpoint gin.HandlerFunc
}
func Init(conf *config.Config) http.Handler {
pj := project.NewHandler(conf)
sc := score.NewHandler(conf)
apiv1 := []Route{
{
Name: "common ping",
Method: http.MethodGet,
Path: "/ping",
Endpoint: ping.Endpoint,
},
{
Name: "show project code",
Method: http.MethodGet,
Path: "/project",
Endpoint: pj.GetProjectCode,
},
{
Name: "score",
Method: http.MethodPost,
Path: "/score",
Endpoint: sc.GetScore,
},
}
ro := gin.New()
v1 := ro.Group("/v1")
for _, r := range apiv1 {
v1.Handle(r.Method, r.Path, r.Endpoint)
}
return ro
}
|
package forms
import (
"strings"
"github.com/astaxie/beego/orm"
"github.com/astaxie/beego/validation"
"github.com/imsilence/gocmdb/server/cloud"
"github.com/imsilence/gocmdb/server/models"
)
type PlatformCreateForm struct {
Name string `form:"name"`
Type string `form:"type"`
Addr string `form:"addr"`
Region string `form:"region"`
Key string `form:"key"`
Secrect string `form:"secrect"`
Remark string `form:"remark"`
}
func (f *PlatformCreateForm) Valid(v *validation.Validation) {
f.Name = strings.TrimSpace(f.Name)
f.Type = strings.TrimSpace(f.Type)
f.Addr = strings.TrimSpace(f.Addr)
f.Region = strings.TrimSpace(f.Region)
f.Key = strings.TrimSpace(f.Key)
f.Secrect = strings.TrimSpace(f.Secrect)
f.Remark = strings.TrimSpace(f.Remark)
v.AlphaDash(f.Name, "name.name").Message("用户名只能由数字、英文字母、中划线和下划线组成")
v.MinSize(f.Name, 5, "name.name").Message("用户名长度必须在%d-%d之内", 5, 32)
v.MaxSize(f.Name, 32, "name.name").Message("用户名长度必须在%d-%d之内", 5, 32)
if _, ok := v.ErrorsMap["name"]; !ok {
ormer := orm.NewOrm()
platform := &models.Platform{Name: f.Name}
if ormer.Read(platform, "Name", "DeleteTime") != orm.ErrNoRows {
v.SetError("name", "名称已存在")
}
}
if _, ok := models.PlatformTypes[f.Type]; !ok {
v.SetError("type", "平台选择不正确")
}
v.MaxSize(f.Addr, 512, "addr.addr").Message("地址长度必须在512个字符之内")
v.MinSize(f.Region, 1, "region.region").Message("区域长度必须在%d-%d之内", 1, 32)
v.MaxSize(f.Region, 32, "region.region").Message("区域长度必须在%d-%d之内", 1, 32)
v.MinSize(f.Key, 1, "key.key").Message("Key长度必须在%d-%d之内", 1, 512)
v.MaxSize(f.Key, 512, "key.key").Message("Key长度必须在%d-%d之内", 1, 512)
v.MinSize(f.Secrect, 1, "secrect.secrect").Message("Secrect长度必须在%d-%d之内", 1, 512)
v.MaxSize(f.Secrect, 512, "secrect.secrect").Message("Secrect长度必须在%d-%d之内", 1, 512)
v.MaxSize(f.Remark, 512, "remark.remark").Message("备注长度必须在512个字符之内")
if !v.HasErrors() {
plugin, err := cloud.DefaultManager.Cloud(f.Type)
if err != nil {
v.SetError("error", err.Error())
} else {
plugin.Init(f.Addr, f.Key, f.Secrect, f.Region)
if err := plugin.TestConnect(); err != nil {
v.SetError("error", "配置不正确, 测试连接失败")
}
}
}
}
type PlatformModifyForm struct {
Id int `form:"id"`
Name string `form:"name"`
Type string `form:"type"`
Addr string `form:"addr"`
Region string `form:"region"`
Key string `form:"key"`
Secrect string `form:"secrect"`
Remark string `form:"remark"`
}
func (f *PlatformModifyForm) Valid(v *validation.Validation) {
f.Name = strings.TrimSpace(f.Name)
f.Type = strings.TrimSpace(f.Type)
f.Addr = strings.TrimSpace(f.Addr)
f.Region = strings.TrimSpace(f.Region)
f.Key = strings.TrimSpace(f.Key)
f.Secrect = strings.TrimSpace(f.Secrect)
f.Remark = strings.TrimSpace(f.Remark)
ormer := orm.NewOrm()
platform := &models.Platform{Id: f.Id}
if ormer.Read(platform) == orm.ErrNoRows {
v.SetError("error", "操作对象不存在")
return
}
v.AlphaDash(f.Name, "name.name").Message("用户名只能由数字、英文字母、中划线和下划线组成")
v.MinSize(f.Name, 5, "name.name").Message("用户名长度必须在%d-%d之内", 5, 32)
v.MaxSize(f.Name, 32, "name.name").Message("用户名长度必须在%d-%d之内", 5, 32)
if _, ok := v.ErrorsMap["name"]; !ok {
ormer := orm.NewOrm()
platform := &models.Platform{Name: f.Name}
if ormer.Read(platform, "Name", "DeleteTime") != orm.ErrNoRows && platform.Id != f.Id {
v.SetError("name", "名称已存在")
}
}
if _, ok := models.PlatformTypes[f.Type]; !ok {
v.SetError("type", "平台选择不正确")
}
v.MaxSize(f.Addr, 512, "addr.addr").Message("地址长度必须在512个字符之内")
v.MinSize(f.Region, 1, "region.region").Message("区域长度必须在%d-%d之内", 1, 32)
v.MaxSize(f.Region, 32, "region.region").Message("区域长度必须在%d-%d之内", 1, 32)
v.MinSize(f.Key, 0, "key.key").Message("Key长度必须在%d-%d之内", 0, 512)
v.MaxSize(f.Key, 512, "key.key").Message("Key长度必须在%d-%d之内", 0, 512)
v.MinSize(f.Secrect, 0, "secrect.secrect").Message("Secrect长度必须在%d-%d之内", 0, 512)
v.MaxSize(f.Secrect, 512, "secrect.secrect").Message("Secrect长度必须在%d-%d之内", 0, 512)
v.MaxSize(f.Remark, 512, "remark.remark").Message("备注长度必须在512个字符之内")
if !v.HasErrors() {
plugin, err := cloud.DefaultManager.Cloud(f.Type)
if err != nil {
v.SetError("error", err.Error())
} else {
key := f.Key
if key == "" {
key = platform.Key
}
secrect := f.Secrect
if secrect == "" {
secrect = platform.Secrect
}
plugin.Init(f.Addr, key, secrect, f.Region)
if err := plugin.TestConnect(); err != nil {
v.SetError("error", "配置不正确, 测试连接失败")
}
}
}
}
|
// Copyright (c) 2013-2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package legacyrpc
import "github.com/btcsuite/btclog"
var log = btclog.Disabled
// UseLogger sets the package-wide logger. Any calls to this function must be
// made before a server is created and used (it is not concurrent safe).
func UseLogger(logger btclog.Logger) {
log = logger
}
|
package main
import (
"fmt"
"net/http"
"strconv"
"strings"
)
const cookieName = "__user_counter"
func main() {
http.HandleFunc("/", incrementCookie)
fmt.Println("listening on http://localhost:8080")
http.ListenAndServe(":8080", nil)
}
func incrementCookie(w http.ResponseWriter, r *http.Request) {
// first, try to read the cookie
c, err := r.Cookie("__user_counter")
if err != nil {
if strings.Contains(err.Error(), "cookie not present") {
http.SetCookie(w, &http.Cookie{
Name: cookieName,
Value: "1",
})
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
// if we didn't return, increment the cookie value
counterVal, err := strconv.Atoi(c.Value)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
newCookie := http.Cookie{
Name: cookieName,
Value: strconv.Itoa(counterVal + 1),
}
http.SetCookie(w, &newCookie)
fmt.Fprintf(w, "Cookie %s set to %d", cookieName, counterVal+1)
}
|
package reader
import (
"errors"
"io"
"io/ioutil"
)
type rawReader struct {
input io.Reader
}
func (r rawReader) Unmarshal(object interface{}) error {
return errors.New("unable to unmarshal plain text")
}
func (r rawReader) Valid() bool {
return true
}
func (r rawReader) Reader() (io.Reader, error) {
return r.input, nil
}
func (r rawReader) Bytes() ([]byte, error) {
reader, _ := r.Reader()
return ioutil.ReadAll(reader)
}
func (r rawReader) ToString() (*string, error) {
result, err := ioutil.ReadAll(r.input)
if err != nil {
return nil, err
}
stringResult := string(result)
return &stringResult, nil
}
func (r rawReader) ContentType() string {
return "text/plain"
}
|
// Package worker implements a worker node. A worker node can connecto to a
// single master node and replicate its data.
package worker
|
package main
import (
"fmt"
"github.com/achakravarty/30-days-of-go/day7"
)
func main() {
var size int
fmt.Scanf("%d\n", &size)
arr := make([]int, size)
for i := 0; i < size; i++ {
fmt.Scanf("%d", &arr[i])
}
outArr := day7.Reverse(arr)
for i := 0; i < size; i++ {
fmt.Printf("%d ", outArr[i])
}
}
|
package gosnowth
import (
"context"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
)
const graphiteMetricsTestData = `[
{
"leaf": true,
"name": "11223344-5566-7788-9900-aabbccddeeff.test;test=test",
"leaf_data": {
"uuid": "11223344-5566-7788-9900-aabbccddeeff",
"name": "test|ST[test:test]",
"egress_function": "avg"
}
}
]`
const graphiteDatapointsTestData = `{
"from": 0,
"to": 900,
"step": 300,
"series": {
"11223344-5566-7788-9900-aabbccddeeff.test": [
null,
0.1,
null
]
}
}`
func TestGraphiteFindMetrics(t *testing.T) {
t.Parallel()
ms := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter,
r *http.Request,
) {
if r.RequestURI == "/state" {
_, _ = w.Write([]byte(stateTestData))
return
}
if r.RequestURI == "/stats.json" {
_, _ = w.Write([]byte(statsTestData))
return
}
if strings.HasPrefix(r.RequestURI,
"/graphite/1/test/metrics/find?query=test") {
w.Header().Set("X-Snowth-Search-Result-Count", "1")
_, _ = w.Write([]byte(graphiteMetricsTestData))
return
}
}))
defer ms.Close()
sc, err := NewClient(context.Background(),
&Config{Servers: []string{ms.URL}})
if err != nil {
t.Fatal("Unable to create snowth client", err)
}
u, err := url.Parse(ms.URL)
if err != nil {
t.Fatal("Invalid test URL")
}
node := &SnowthNode{url: u}
res, err := sc.GraphiteFindMetrics(1, "test", "test", nil, node)
if err != nil {
t.Fatal(err)
}
if len(res) != 1 {
t.Fatalf("Expected result length: 1, got: %v", len(res))
}
exp := "11223344-5566-7788-9900-aabbccddeeff.test;test=test"
if res[0].Name != exp {
t.Errorf("Expected metric name: %v, got: %v", exp, res[0].Name)
}
}
func TestGraphiteFindTags(t *testing.T) {
t.Parallel()
ms := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter,
r *http.Request,
) {
if r.RequestURI == "/state" {
_, _ = w.Write([]byte(stateTestData))
return
}
if r.RequestURI == "/stats.json" {
_, _ = w.Write([]byte(statsTestData))
return
}
if strings.HasPrefix(r.RequestURI,
"/graphite/1/test/tags/find?query=test") {
w.Header().Set("X-Snowth-Search-Result-Count", "1")
_, _ = w.Write([]byte(graphiteMetricsTestData))
return
}
}))
defer ms.Close()
sc, err := NewClient(context.Background(),
&Config{Servers: []string{ms.URL}})
if err != nil {
t.Fatal("Unable to create snowth client", err)
}
u, err := url.Parse(ms.URL)
if err != nil {
t.Fatal("Invalid test URL")
}
node := &SnowthNode{url: u}
res, err := sc.GraphiteFindTags(1, "test", "test", nil, node)
if err != nil {
t.Fatal(err)
}
if len(res) != 1 {
t.Fatalf("Expected result length: 1, got: %v", len(res))
}
exp := "11223344-5566-7788-9900-aabbccddeeff.test;test=test"
if res[0].Name != exp {
t.Errorf("Expected metric name: %v, got: %v", exp, res[0].Name)
}
}
func TestGraphiteGetDatapoints(t *testing.T) {
t.Parallel()
ms := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter,
r *http.Request,
) {
if r.RequestURI == "/state" {
_, _ = w.Write([]byte(stateTestData))
return
}
if r.RequestURI == "/stats.json" {
_, _ = w.Write([]byte(statsTestData))
return
}
if strings.HasPrefix(r.RequestURI,
"/graphite/1/test/series_multi") {
w.Header().Set("X-Snowth-Search-Result-Count", "1")
_, _ = w.Write([]byte(graphiteDatapointsTestData))
return
}
}))
defer ms.Close()
sc, err := NewClient(context.Background(),
&Config{Servers: []string{ms.URL}})
if err != nil {
t.Fatal("Unable to create snowth client", err)
}
u, err := url.Parse(ms.URL)
if err != nil {
t.Fatal("Invalid test URL")
}
node := &SnowthNode{url: u}
res, err := sc.GraphiteGetDatapoints(1, "test", &GraphiteLookup{
Start: 0,
End: 900,
Names: []string{"11223344-5566-7788-9900-aabbccddeeff.test"},
}, nil, node)
if err != nil {
t.Fatal(err)
}
if res.From != 0 {
t.Errorf("Expected from: 0, got: %v", res.From)
}
if res.To != 900 {
t.Errorf("Expected to: 900, got: %v", res.To)
}
if res.Step != 300 {
t.Errorf("Expected step: 300, got: %v", res.Step)
}
rv := res.Series["11223344-5566-7788-9900-aabbccddeeff.test"][0]
if rv != nil {
t.Errorf("Expected null value, got: %v", rv)
}
rv = res.Series["11223344-5566-7788-9900-aabbccddeeff.test"][1]
if *rv != 0.1 {
t.Errorf("Expected value: 0.1, got: %v", *rv)
}
}
|
package asset
import (
sdk "github.com/irisnet/irishub/types"
)
// InitGenesis - store genesis parameters
func InitGenesis(ctx sdk.Context, k Keeper, data GenesisState) {
if err := ValidateGenesis(data); err != nil {
panic(err.Error())
}
k.SetParamSet(ctx, data.Params)
// init gateways
for _, gateway := range data.Gateways {
k.SetGateway(ctx, gateway)
k.SetOwnerGateway(ctx, gateway.Owner, gateway.Moniker)
}
//init tokens
for _, token := range data.Tokens {
_, _, err := k.AddToken(ctx, token)
if err != nil {
panic(err.Error())
}
}
}
// ExportGenesis - output genesis parameters
func ExportGenesis(ctx sdk.Context, k Keeper) GenesisState {
// export created gateways
var gateways []Gateway
k.IterateGateways(ctx, func(gw Gateway) (stop bool) {
gateways = append(gateways, gw)
return false
})
// export created token
var tokens Tokens
k.IterateTokens(ctx, func(token FungibleToken) (stop bool) {
tokens = append(tokens, token)
return false
})
return GenesisState{
Params: k.GetParamSet(ctx),
Tokens: tokens,
Gateways: gateways,
}
}
// get raw genesis raw message for testing
func DefaultGenesisState() GenesisState {
return GenesisState{
Params: DefaultParams(),
Tokens: []FungibleToken{},
Gateways: []Gateway{},
}
}
// get raw genesis raw message for testing
func DefaultGenesisStateForTest() GenesisState {
return GenesisState{
Params: DefaultParamsForTest(),
Tokens: []FungibleToken{},
Gateways: []Gateway{},
}
}
// ValidateGenesis validates the provided asset genesis state to ensure the
// expected invariants holds.
func ValidateGenesis(data GenesisState) error {
err := ValidateParams(data.Params)
if err != nil {
return err
}
// validate gateways
if err := validateGateways(data.Gateways); err != nil {
return err
}
// validate tokens
if err := data.Tokens.Validate(); err != nil {
return err
}
return nil
}
// ValidateGateways validates the provided gateways
func validateGateways(gateways []Gateway) error {
for _, gateway := range gateways {
if err := gateway.Validate(); err != nil {
return err
}
}
return nil
}
|
package nilchan
import "fmt"
// CheckChanAndMake cheks if chan of type int is nil and make it
func CheckChanAndMake() {
var a chan int
if a == nil {
fmt.Println("channel is nil")
a = make(chan int)
fmt.Printf("Type of channel is %T", a)
if a == nil {
fmt.Println("channel is nil again")
}
}
}
|
package k8s
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
)
type ContainerLogsImpl struct {
namespace string
podName string
container corev1.Container
}
func NewContainerLogger(namespace, podName string, container corev1.Container) ContainerLogsImpl {
return ContainerLogsImpl{
namespace: namespace,
podName: podName,
container: container,
}
}
func (c ContainerLogsImpl) Fetch(ctx context.Context, restApi rest.Interface) (io.ReadCloser, error) {
opts := &corev1.PodLogOptions{Container: c.container.Name}
req := restApi.Get().Namespace(c.namespace).Name(c.podName).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec)
stream, err := req.Stream(ctx)
if err != nil {
err = errors.Wrap(err, "failed to create container log stream")
}
return stream, err
}
func (c ContainerLogsImpl) Write(reader io.ReadCloser, rootDir string) error {
containerLogDir := filepath.Join(rootDir, c.container.Name)
if err := os.MkdirAll(containerLogDir, 0744); err != nil && !os.IsExist(err) {
return fmt.Errorf("error creating container log dir: %s", err)
}
path := filepath.Join(containerLogDir, fmt.Sprintf("%s.log", c.container.Name))
logrus.Debugf("Writing pod container log %s", path)
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
defer reader.Close()
if _, err := io.Copy(file, reader); err != nil {
cpErr := fmt.Errorf("failed to copy container log:\n%s", err)
if wErr := writeError(cpErr, file); wErr != nil {
return fmt.Errorf("failed to write previous err [%s] to file: %s", err, wErr)
}
return err
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.