text stringlengths 11 4.05M |
|---|
package RedisPool
import (
"Common/redisz"
"errors"
"fmt"
_ "github.com/garyburd/redigo/redis"
)
var redisPool *redisz.RedisPool
func InitRedis(ip string, runCount int) {
redisPool = redisz.NewRedisPool("common", ip, "", runCount)
}
func RedisDBSize() {
size := redisPool.Dbsize()
fmt.Printf("size is %d \n", size)
}
func RedisReadBytes(key string) ([]byte, error) {
return redisPool.Get(key)
}
func RedisReadStringMap(key string) (map[string]string, error) {
return redisPool.Hgetall(key)
}
func RedisReadString(key string, timesava int) (string, error) {
data, err := redisPool.Get(key)
if timesava > 0 {
err = redisPool.Exprie(key, timesava*3600)
if err != nil {
return "", err
}
}
return string(data), nil
}
func RedisReadInt64(key string, timesava int) (int64, error) {
val, err := redisPool.GetInt64(key)
if err != nil {
return 0, err
}
if timesava > 0 {
err = redisPool.Exprie(key, timesava*3600)
if err != nil {
return 0, err
}
}
return val, nil
}
func RedisWrite(key string, data interface{}, timesava int) error {
err := redisPool.Set(key, data)
if err != nil {
return err
}
if timesava > 0 {
err = redisPool.Exprie(key, timesava*3600)
if err != nil {
return err
}
}
return nil
}
func RedisWriteMapField(key, field string, data interface{}, timesava int) error {
err := redisPool.Hset(key, field, data)
if err != nil {
return err
}
if timesava > 0 {
err = redisPool.Exprie(key, timesava*3600)
if err != nil {
return err
}
}
return nil
}
func RedisListRPush(key string, data string, timesava int) error {
ret := redisPool.Rpush(key, data)
if ret < 0 {
return errors.New("Redis RPush Fail")
}
if timesava > 0 {
err := redisPool.Exprie(key, timesava*3600)
if err != nil {
return err
}
}
return nil
}
func RedisListRPop(key string) (string, error) {
retData := redisPool.Rpop(key)
if retData == string("") {
return retData, errors.New("No Data")
}
return retData, nil
}
//var myPool chan redis.Conn
//var add string
//var count int
//func InitRedis(ip string, runCount int) {
// logger.Info("InitRedis", ip)
// add = ip
// count = runCount
//}
//func getRedis() redis.Conn {
// if myPool == nil {
// myPool = make(chan redis.Conn, count)
// }
// createFunc := func() {
// for i := 0; i < count/2; i++ {
// for {
// conn, err := redis.DialTimeout("tcp", add, 10*time.Second, 1*time.Second, 1*time.Second)
// if err != nil {
// logger.Error("connect to redis error", err)
// time.Sleep(3 * time.Second)
// } else {
// putRedis(conn)
// break
// }
// }
// }
// }
// if len(myPool) == 0 {
// go createFunc()
// }
// for {
// select {
// case p := <-myPool:
// s, err := p.Do("PING")
// if s == "PONG" && err == nil {
// return p
// } else {
// p.Close()
// }
// case <-time.After(5 * time.Second):
// return nil
// }
// }
//}
//func putRedis(conn redis.Conn) {
// if myPool == nil {
// myPool = make(chan redis.Conn, count)
// }
// if len(myPool) == count {
// conn.Close()
// return
// }
// myPool <- conn
//}
//func closeConnector(conn redis.Conn) {
// conn.Close()
//}
//func RedisDBSize() {
// conn := getRedis()
// if conn == nil {
// return
// }
// defer putRedis(conn)
// size, _ := conn.Do("DBSIZE")
// fmt.Printf("size is %d \n", size)
//}
//func RedisReadBytes(key string) ([]byte, error) {
// conn := getRedis()
// if conn == nil {
// return nil, errors.New("get redis error")
// }
// defer putRedis(conn)
// data, err := redis.Bytes(conn.Do("GET", key))
// if err != nil {
// return nil, err
// }
// return data, nil
//}
//func RedisReadStringMap(key string) (map[string]string, error) {
// conn := getRedis()
// if conn == nil {
// return nil, errors.New("get redis error")
// }
// defer putRedis(conn)
// data, err := redis.StringMap(conn.Do("HGETALL", key))
// if err != nil {
// return nil, err
// }
// return data, nil
//}
//func RedisReadString(key string, timesava int) (string, error) {
// conn := getRedis()
// if conn == nil {
// return "", errors.New("get redis error")
// }
// defer putRedis(conn)
// data, err := redis.String(conn.Do("GET", key))
// if err != nil {
// return "", err
// }
// if timesava > 0 {
// _, err = conn.Do("EXPIRE", key, timesava*3600)
// if err != nil {
// logger.Error("set ket time error", err)
// return "", err
// }
// }
// return data, nil
//}
//func RedisReadInt64(key string, timesava int) (int64, error) {
// conn := getRedis()
// if conn == nil {
// return 0, errors.New("get redis error")
// }
// defer putRedis(conn)
// data, err := redis.Int64(conn.Do("GET", key))
// if err != nil {
// return 0, err
// }
// if timesava > 0 {
// _, err = conn.Do("EXPIRE", key, timesava*3600)
// if err != nil {
// logger.Error("set ket time error", err)
// return 0, err
// }
// }
// return data, nil
//}
//func RedisWrite(key string, data interface{}, timesava int) error {
// conn := getRedis()
// if conn == nil {
// return errors.New("get redis error")
// }
// defer putRedis(conn)
// _, err := conn.Do("SET", key, data)
// if err != nil {
// logger.Error("save redis error", err)
// return err
// }
// _, err = conn.Do("EXPIRE", key, timesava*3600)
// if err != nil {
// logger.Error("set ket time error", err)
// return err
// }
// return nil
// // logger.Debug(n)
// // if n == int64(1) {
// // n, err := conn.Do("EXPIRE", key, timesava*3600)
// // if n == int64(1) {
// // return nil
// // } else {
// // logger.Error("set ket time error", err)
// // return err
// // }
// // } else if n == int64(0) {
// // return errors.New("the key has already existed")
// // }
// // return errors.New(fmt.Sprintf("error is %lld", n))
//}
//func RedisWriteMapField(key, field string, data interface{}, timesava int) error {
// conn := getRedis()
// if conn == nil {
// return errors.New("get redis error")
// }
// defer putRedis(conn)
// _, err := conn.Do("HSET", key, field, data)
// if err != nil {
// logger.Error("save redis error", err)
// return err
// }
// if timesava > 0 {
// _, err = conn.Do("EXPIRE", key, timesava*3600)
// if err != nil {
// logger.Error("set ket time error", err)
// return err
// }
// }
// return nil
//}
|
package inmobi
import (
"encoding/json"
"errors"
"fmt"
"github.com/econnelly/myrevenue"
"github.com/econnelly/myrevenue/adnetwork"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
)
type ReportRequester struct {
SessionID string `json:"session_id"`
AccountID string `json:"account_id"`
Username string `json:"username"`
SecretKey string `json:"secret_key"`
StartDate time.Time
EndDate time.Time
adnetwork.Request
reportURL string
rawData interface{}
}
type ReportResponse struct {
Error bool `json:"error"`
ErrorList []struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"errorList"`
RespList []struct {
AdImpressions uint64 `json:"adImpressions"`
AdRequests uint64 `json:"adRequests"`
Clicks int `json:"clicks"`
Earnings float64 `json:"earnings"`
Date string `json:"date"`
} `json:"respList"`
}
type Session struct {
RespList []struct {
SessionID string `json:"sessionId"`
AccountID string `json:"accountId"`
SubAccounts interface{} `json:"subAccounts"`
} `json:"respList"`
Error bool `json:"error"`
ErrorList []interface{} `json:"errorList"`
}
type RequestFilter struct {
FilterName string `json:"filterName"`
FilterValue string `json:"filterValue"`
Comparator string `json:"comparator"`
}
type RequestInfo struct {
Metrics []string `json:"metrics"`
TimeFrame string `json:"timeFrame"`
GroupBy []string `json:"groupBy"`
FilterBy []RequestFilter `json:"filterBy"`
}
type RequestData struct {
ReportRequest RequestInfo `json:"reportRequest"`
}
func (rr *ReportRequester) Initialize() error {
var err error
rr.SessionID, rr.AccountID, err = rr.startSession()
return err
}
func (rr *ReportRequester) startSession() (string, string, error) {
baseUrl := "https://api.inmobi.com"
resource := "/v1.0/generatesession/generate"
requestUrl, _ := url.ParseRequestURI(baseUrl)
requestUrl.Path = resource
client := &http.Client{}
n, err := http.NewRequest(http.MethodGet, requestUrl.String(), nil)
if err != nil {
log.Fatalln(err)
return "", "", err
}
n.Header.Add("userName", rr.Username)
n.Header.Add("secretKey", rr.SecretKey)
resp, err := client.Do(n)
if err != nil {
return "", "", err
}
session, err := rr.createSessionModel(resp.Body)
defer resp.Body.Close()
if err != nil {
return "", "", err
}
return session.RespList[0].SessionID, session.RespList[0].AccountID, nil
}
func (rr ReportRequester) createSessionModel(reader io.Reader) (Session, error) {
result := Session{}
body, e := ioutil.ReadAll(reader)
if e != nil {
return Session{}, e
}
e = json.Unmarshal(body, &result)
if e != nil {
return result, e
}
if result.RespList == nil || len(result.RespList) == 0 {
return result, e
}
return result, nil
}
func (rr *ReportRequester) Fetch() ([]myrevenue.Model, error) {
headers := map[string]string{
"Accept": "application/json; charset=utf-8",
"Content-Type": "application/json",
"accountId": rr.AccountID,
"sessionId": rr.SessionID,
"secretKey": rr.SecretKey,
}
startDate := rr.StartDate.UTC().Format("2006-01-02")
endDate := rr.EndDate.UTC().Format("2006-01-02")
filter := make([]RequestFilter, 1)
filter[0].Comparator = ">"
filter[0].FilterName = "adImpressions"
filter[0].FilterValue = "0"
dataStruct := RequestData{
ReportRequest: RequestInfo{
Metrics: []string{"adRequests", "adImpressions", "clicks", "earnings"},
TimeFrame: fmt.Sprintf("%v:%v", startDate, endDate),
GroupBy: []string{"date"},
FilterBy: filter,
},
}
data, err := json.Marshal(dataStruct)
if err != nil {
return nil, err
}
baseUrl := "https://api.inmobi.com"
resource := "/v3.0/reporting/publisher"
requestUrl, _ := url.ParseRequestURI(baseUrl)
requestUrl.Path = resource
resp, err := myrevenue.PostRequest(requestUrl.String(), headers, string(data), false)
defer resp.Body.Close()
return rr.parse(resp.Body)
}
func (rr *ReportRequester) parse(reader io.ReadCloser) ([]myrevenue.Model, error) {
result := ReportResponse{}
body, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
e := json.Unmarshal(body, &result)
if e != nil {
return nil, e
}
rr.rawData = result
return rr.convertToReportModel(result)
}
func (rr ReportRequester) convertToReportModel(response ReportResponse) ([]myrevenue.Model, error) {
reportModels := make([]myrevenue.Model, len(response.RespList))
if response.Error {
responseError := response.ErrorList[0]
return nil, errors.New(responseError.Message)
}
loc, e := time.LoadLocation("Etc/UTC")
if e != nil {
return nil, e
}
for i, item := range response.RespList {
reportModels[i].Impressions = item.AdImpressions
reportModels[i].Revenue = item.Earnings
reportModels[i].Requests = item.AdRequests
day, parseError := time.ParseInLocation("2006-01-02 15:04:05", item.Date, loc)
if parseError != nil {
return nil, parseError
}
reportModels[i].DateTime = day
}
return reportModels, nil
}
func (rr ReportRequester) Error(reader io.ReadCloser, err error) {
}
func (rr ReportRequester) GetName() string {
return "Inmobi"
}
func (rr ReportRequester) GetReport() interface{} {
return rr.rawData
}
func (rr ReportRequester) GetStartDate() time.Time {
return rr.StartDate
}
func (rr ReportRequester) GetEndDate() time.Time {
return rr.EndDate
}
|
// Copyright 2017 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package handlers
import (
"encoding/json"
"io"
"net"
)
type DataOptsHandler struct {
Enc *json.Encoder
}
func NewDataOptsHandler(w io.Writer) *DataOptsHandler {
return &DataOptsHandler{Enc: json.NewEncoder(w)}
}
func (d *DataOptsHandler) String() string {
return "Data Operations Storage Handler"
}
func DataOptsDriver(data []JSONFileFormat, handler DataHandler) error {
var err error
for _, opt := range data {
var err error
var ipnet *net.IPNet
switch opt.Type {
case OptDomain:
err = handler.InsertDomain(opt.Domain, opt.Tag, opt.Source)
case OptCNAME:
err = handler.InsertCNAME(opt.Name, opt.Domain, opt.TargetName, opt.TargetDomain, opt.Tag, opt.Source)
case OptA:
err = handler.InsertA(opt.Name, opt.Domain, opt.Address, opt.Tag, opt.Source)
case OptAAAA:
err = handler.InsertAAAA(opt.Name, opt.Domain, opt.Address, opt.Tag, opt.Source)
case OptPTR:
err = handler.InsertPTR(opt.Name, opt.Domain, opt.TargetName, opt.Tag, opt.Source)
case OptSRV:
err = handler.InsertSRV(opt.Name, opt.Domain, opt.Service, opt.TargetName, opt.Tag, opt.Source)
case OptNS:
err = handler.InsertNS(opt.Name, opt.Domain, opt.TargetName, opt.TargetDomain, opt.Tag, opt.Source)
case OptMX:
err = handler.InsertMX(opt.Name, opt.Domain, opt.TargetName, opt.TargetDomain, opt.Tag, opt.Source)
case OptInfrastructure:
if _, ipnet, err = net.ParseCIDR(opt.CIDR); err == nil {
err = handler.InsertInfrastructure(opt.Address, opt.ASN, ipnet, opt.Description)
}
}
if err != nil {
break
}
}
return err
}
func ParseDataOpts(r io.Reader) ([]JSONFileFormat, error) {
var data []JSONFileFormat
dec := json.NewDecoder(r)
for {
var opt JSONFileFormat
if err := dec.Decode(&opt); err == io.EOF {
break
} else if err != nil {
return nil, err
}
data = append(data, opt)
}
return data, nil
}
func (d *DataOptsHandler) InsertDomain(domain, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptDomain,
Domain: domain,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertCNAME(name, domain, target, tdomain, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptCNAME,
Name: name,
Domain: domain,
TargetName: target,
TargetDomain: tdomain,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertA(name, domain, addr, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptA,
Name: name,
Domain: domain,
Address: addr,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertAAAA(name, domain, addr, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptAAAA,
Name: name,
Domain: domain,
Address: addr,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertPTR(name, domain, target, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptPTR,
Name: name,
Domain: domain,
TargetName: target,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertSRV(name, domain, service, target, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptSRV,
Name: name,
Domain: domain,
Service: service,
TargetName: target,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertNS(name, domain, target, tdomain, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptNS,
Name: name,
Domain: domain,
TargetName: target,
TargetDomain: tdomain,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertMX(name, domain, target, tdomain, tag, source string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptMX,
Name: name,
Domain: domain,
TargetName: target,
TargetDomain: tdomain,
Tag: tag,
Source: source,
})
}
func (d *DataOptsHandler) InsertInfrastructure(addr string, asn int, cidr *net.IPNet, desc string) error {
return d.Enc.Encode(&JSONFileFormat{
Type: OptInfrastructure,
Address: addr,
ASN: asn,
CIDR: cidr.String(),
Description: desc,
})
}
|
package openid
import (
"fmt"
"net/http"
"github.com/dgrijalva/jwt-go"
)
// SetupErrorCode is the type of error code that can
// be returned by the operations done during middleware setup.
type SetupErrorCode uint32
// Setup error constants.
const (
SetupErrorInvalidIssuer SetupErrorCode = iota // Invalid issuer provided during setup.
SetupErrorInvalidClientIDs // Invalid client id collection provided during setup.
SetupErrorEmptyProviderCollection // Empty collection of providers provided during setup.
)
// ValidationErrorCode is the type of error code that can
// be returned by the operations done during token validation.
type ValidationErrorCode uint32
// Validation error constants.
const (
ValidationErrorAuthorizationHeaderNotFound ValidationErrorCode = iota // Authorization header not found on request.
ValidationErrorAuthorizationHeaderWrongFormat // Authorization header unexpected format.
ValidationErrorAuthorizationHeaderWrongSchemeName // Authorization header unexpected scheme.
ValidationErrorJwtValidationFailure // Jwt token validation failed with a known error.
ValidationErrorJwtValidationUnknownFailure // Jwt token validation failed with an unknown error.
ValidationErrorInvalidAudienceType // Unexpected token audience type.
ValidationErrorInvalidAudience // Unexpected token audience content.
ValidationErrorAudienceNotFound // Unexpected token audience value. Audience not registered.
ValidationErrorInvalidIssuerType // Unexpected token issuer type.
ValidationErrorInvalidIssuer // Unexpected token issuer content.
ValidationErrorIssuerNotFound // Unexpected token value. Issuer not registered.
ValidationErrorGetOpenIdConfigurationFailure // Failure while retrieving the OIDC configuration.
ValidationErrorDecodeOpenIdConfigurationFailure // Failure while decoding the OIDC configuration.
ValidationErrorGetJwksFailure // Failure while retrieving jwk set.
ValidationErrorDecodeJwksFailure // Failure while decoding the jwk set.
ValidationErrorEmptyJwk // Empty jwk returned.
ValidationErrorEmptyJwkKey // Empty jwk key set returned.
ValidationErrorMarshallingKey // Error while marshalling the signing key.
ValidationErrorKidNotFound // Key identifier not found.
ValidationErrorInvalidSubjectType // Unexpected token subject type.
ValidationErrorInvalidSubject // Unexpected token subject content.
ValidationErrorSubjectNotFound // Token missing the 'sub' claim.
ValidationErrorIdTokenEmpty // Empty ID token.
ValidationErrorEmptyProviders // Empty collection of providers.
)
const setupErrorMessagePrefix string = "Setup Error."
const validationErrorMessagePrefix string = "Validation Error."
// SetupError represents the error returned by operations called during
// middleware setup.
type SetupError struct {
Err error
Code SetupErrorCode
Message string
}
// Error returns a formatted string containing the error Message.
func (se SetupError) Error() string {
return fmt.Sprintf("Setup error. %v", se.Message)
}
// ValidationError represents the error returned by operations called during
// token validation.
type ValidationError struct {
Err error
Code ValidationErrorCode
Message string
HTTPStatus int
}
// The ErrorHandlerFunc represents the function used to handle errors during token
// validation. Applications can have their own implementation of this function and
// register it using the ErrorHandler option. Through this extension point applications
// can choose what to do upon different error types, for instance return an certain HTTP Status code
// and/or include some detailed message in the response.
// This function returns false if the next handler registered after the ID Token validation
// should be executed when an error is found or true if the execution should be stopped.
type ErrorHandlerFunc func(error, http.ResponseWriter, *http.Request) bool
// Error returns a formatted string containing the error Message.
func (ve ValidationError) Error() string {
return fmt.Sprintf("Validation error. %v", ve.Message)
}
// jwtErrorToOpenIDError converts errors of the type *jwt.ValidationError returned during token validation into errors of type *ValidationError
func jwtErrorToOpenIDError(e error) *ValidationError {
if jwtError, ok := e.(*jwt.ValidationError); ok {
if (jwtError.Errors & (jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired | jwt.ValidationErrorSignatureInvalid)) != 0 {
return &ValidationError{
Code: ValidationErrorJwtValidationFailure,
Message: "Jwt token validation failed.",
HTTPStatus: http.StatusUnauthorized,
}
}
if (jwtError.Errors & jwt.ValidationErrorMalformed) != 0 {
return &ValidationError{
Code: ValidationErrorJwtValidationFailure,
Message: "Jwt token validation failed.",
HTTPStatus: http.StatusBadRequest,
}
}
if (jwtError.Errors & jwt.ValidationErrorUnverifiable) != 0 {
// TODO: improve this once https://github.com/dgrijalva/jwt-go/issues/108 is resolved.
// Currently jwt.Parse does not surface errors returned by the KeyFunc.
return &ValidationError{
Code: ValidationErrorJwtValidationFailure,
Message: jwtError.Error(),
HTTPStatus: http.StatusUnauthorized,
}
}
}
return &ValidationError{
Code: ValidationErrorJwtValidationUnknownFailure,
Message: "Jwt token validation failed with unknown error.",
HTTPStatus: http.StatusInternalServerError,
}
}
func validationErrorToHTTPStatus(e error, rw http.ResponseWriter, req *http.Request) (halt bool) {
if verr, ok := e.(*ValidationError); ok {
http.Error(rw, verr.Message, verr.HTTPStatus)
} else {
rw.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(rw, e.Error())
}
return true
}
|
package fitbit
import (
"context"
"fmt"
"net/http"
)
type HeartRateData struct {
ActivitiesHeart []ActivitiesHeart `json:"activities-heart"`
ActivitiesHeartIntraday ActivitiesHeartIntraday `json:"activities-heart-intraday"`
}
type ActivitiesHeart struct {
DateTime string `json:"dateTime"`
Value struct {
CustomHeartRateZones []interface{} `json:"customHeartRateZones"`
HeartRateZones []struct {
CaloriesOut float64 `json:"caloriesOut"`
Max int `json:"max"`
Min int `json:"min"`
Minutes int `json:"minutes"`
Name string `json:"name"`
} `json:"heartRateZones"`
RestingHeartRate int `json:"restingHeartRate"`
} `json:"value"`
}
type ActivitiesHeartIntraday struct {
Dataset []struct {
Time string `json:"time"`
Value int `json:"value"`
} `json:"dataset"`
DatasetInterval int `json:"datasetInterval"`
DatasetType string `json:"datasetType"`
}
func (api *Client) GetHeartRateIntradayTimeSeries(param *HeartRateParam) (*HeartRateData, *Response, error) {
return api.GetHeartRateIntradayTimeSeriesContext(context.Background(), param)
}
func (api *Client) GetHeartRateIntradayTimeSeriesContext(ctx context.Context, param *HeartRateParam) (*HeartRateData, *Response, error) {
var m HeartRateData
req, err := GetHeartRateIntradayTimeSeriesRequest(ctx, param)
if err != nil {
return nil, nil, err
}
resp, err := api.do_request(req, &m)
return &m, resp, err
}
func GetHeartRateIntradayTimeSeries(c *Client, param *HeartRateParam) (*HeartRateData, error) {
return GetHeartRateIntradayTimeSeriesContext(context.Background(), c, param)
}
func GetHeartRateIntradayTimeSeriesContext(ctx context.Context, c *Client, param *HeartRateParam) (*HeartRateData, error) {
m, _, err := c.GetHeartRateIntradayTimeSeriesContext(ctx, param)
return m, err
}
// Get Heart Rate Intraday Time Series
// Personal App Type is requeired
/*
GET https://api.fitbit.com/1/user/-/activities/heart/date/[date]/[end-date]/[detail-level].json
GET https://api.fitbit.com/1/user/-/activities/heart/date/[date]/[end-date]/[detail-level]/time/[start-time]/[end-time].json
GET https://api.fitbit.com/1/user/-/activities/heart/date/[date]/1d/[detail-level].json`
GET https://api.fitbit.com/1/user/-/activities/heart/date/[date]/1d/[detail-level]/time/[start-time]/[end-time].json
*/
type HeartRateParam struct {
Date string `validate:"len=0|datetime=2006-01-02"`
EndDate string `validate:"len=0|datetime=2006-01-02"`
DetailLevel string `validate:"oneof=1sec 1min"`
StartTime string `validate:"len=0|datetime=15:04"`
EndTime string `validate:"len=0|datetime=15:04"`
}
func (c *HeartRateParam) parse() error {
if err := validate.Struct(*c); err != nil {
return err
}
return nil
}
func GetHeartRateIntradayTimeSeriesRequest(ctx context.Context, param *HeartRateParam) (*http.Request, error) {
if err := param.parse(); err != nil {
return nil, err
}
var url string
endDate := param.EndDate
if endDate == "" {
endDate = "1d"
}
if param.StartTime == "" && param.EndTime == "" {
url = fmt.Sprintf("https://api.fitbit.com/1/user/-/activities/heart/date/%s/%s/%s.json", param.Date, endDate, param.DetailLevel)
}
if param.StartTime != "" && param.EndTime != "" {
url = fmt.Sprintf("https://api.fitbit.com/1/user/-/activities/heart/date/%s/%s/%s/time/%s/%s.json",
param.Date, endDate, param.DetailLevel, param.StartTime, param.EndTime)
}
if url == "" {
return nil, ErrRequestUnknown
}
return http.NewRequestWithContext(ctx, "GET", url, nil)
}
|
package main
import (
"fmt"
// "time"
)
func main() {
ch := make(chan int)
quit := make(chan int)
go read(ch, quit)
go write(ch)
// for i := 0; i < 10; i++ {
// fmt.Println(<-ch, "read")
// }
// time.Sleep(time.Second)
// close(ch)
<-quit
}
func write(b chan int) {
for i := 0; i < 10; i++ {
b <- i
fmt.Println("write", i)
// time.Sleep(time.Second)
}
}
func read(b chan int, quit chan int) {
for i := 0; i < 10; i++ {
fmt.Println(<-b, "read")
}
quit <- 0
}
|
package parcels
// https://habr.com/ru/post/114947/
// transcription https://www.study.ru/article/fonetika-angliyskogo/transkripciya-i-pravila-chteniya
// https://iloveenglish.ru/stories/view/vse-o-transkriptsii-v-anglijskom-yazike
// https://www.translate.ru/Gramm/Rules/
// https://sloovo.com/ru/biblioteka.php?type=obuchenie&language=EN&category=spravochnik&url=angliyskaya-transkripciya-i-pravila-chteniya
// https://ru.wikipedia.org/wiki/%D0%90%D0%BD%D0%B3%D0%BB%D0%BE-%D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B0%D1%8F_%D0%BF%D1%80%D0%B0%D0%BA%D1%82%D0%B8%D1%87%D0%B5%D1%81%D0%BA%D0%B0%D1%8F_%D1%82%D1%80%D0%B0%D0%BD%D1%81%D0%BA%D1%80%D0%B8%D0%BF%D1%86%D0%B8%D1%8F
// hyphenation https://github.com/mnater/hyphenator
var (
// voiced: muffled
mRu1 = map[rune]rune{
'б': 'п',
'з': 'с',
'д': 'т',
'в': 'ф',
'г': 'к',
}
// Consonants except Л, М, Н, Р
mRu2 = map[rune]bool{
'б': true,
'в': true,
'г': true,
'д': true,
'ж': true,
'з': true,
'й': true,
'к': true,
'п': true,
'с': true,
'т': true,
'ф': true,
'х': true,
'ц': true,
'ч': true,
'ш': true,
'щ': true,
}
// All consonants
mRu3 = map[rune]bool{
'б': true,
'в': true,
'г': true,
'д': true,
'ж': true,
'з': true,
'й': true,
'к': true,
'л': true,
'м': true,
'н': true,
'п': true,
'р': true,
'с': true,
'т': true,
'ф': true,
'х': true,
'ц': true,
'ч': true,
'ш': true,
'щ': true,
}
)
func MetaphoneRu(rs []rune) []rune {
res := make([]rune, 0, len(rs))
i := 0
l := len(rs)
for i < l {
r := rs[i]
i++
if i < l {
switch r {
case 'й', 'и':
switch rs[i] {
case 'о', 'е':
res = append(res, 'и')
i++
continue
}
case 'т', 'д':
if rs[i] == 'с' {
res = append(res, 'ц')
i++
continue
}
}
if r == rs[i] {
if _, ok := mRu3[r]; ok {
res = append(res, r)
i++
continue
}
}
}
switch r {
case 'ь':
continue
case 'ю':
res = append(res, 'у')
continue
case 'о', 'ы', 'я':
res = append(res, 'а')
continue
case 'е', 'ё', 'э':
res = append(res, 'и')
continue
}
if i == l {
res = append(res, makeDeafConsonantRu(r))
continue
}
rr := rs[i]
if _, ok := mRu2[rr]; ok {
res = append(res, makeDeafConsonantRu(r))
} else {
res = append(res, r)
}
}
return res
}
func makeDeafConsonantRu(r rune) rune {
res, replaced := mRu1[r]
if replaced {
return res
}
return r
}
var (
// voiced: muffled
mUa1 = map[rune]rune{
'б': 'п',
'з': 'с',
'д': 'т',
'в': 'ф',
'г': 'к',
}
// Consonants except Л, М, Н, Р
mUa2 = map[rune]bool{
'б': true,
'в': true,
'г': true,
'д': true,
'ж': true,
'з': true,
'й': true,
'к': true,
'п': true,
'с': true,
'т': true,
'ф': true,
'х': true,
'ц': true,
'ч': true,
'ш': true,
'щ': true,
}
// All consonants
mUa3 = map[rune]bool{
'б': true,
'в': true,
'г': true,
'д': true,
'ж': true,
'з': true,
'й': true,
'к': true,
'л': true,
'м': true,
'н': true,
'п': true,
'р': true,
'с': true,
'т': true,
'ф': true,
'х': true,
'ц': true,
'ч': true,
'ш': true,
'щ': true,
}
)
func MetaphoneUa(rs []rune) []rune {
res := make([]rune, 0, len(rs))
i := 0
l := len(rs)
for i < l {
r := rs[i]
i++
if i < l {
switch r {
case 'й', 'и':
switch rs[i] {
case 'о', 'е':
res = append(res, 'и')
i++
continue
}
case 'т', 'д':
if rs[i] == 'с' {
res = append(res, 'ц')
i++
continue
}
}
if r == rs[i] {
if _, ok := mUa3[r]; ok {
res = append(res, r)
i++
continue
}
}
}
switch r {
case 'ь':
continue
case 'ю':
res = append(res, 'у')
continue
case 'ї':
res = append(res, 'і')
continue
case 'о', 'я':
res = append(res, 'а')
continue
case 'е', 'є':
res = append(res, 'и')
continue
}
if i == l {
res = append(res, makeDeafConsonantUa(r))
continue
}
rr := rs[i]
if _, ok := mUa2[rr]; ok {
res = append(res, makeDeafConsonantUa(r))
} else {
res = append(res, r)
}
}
return res
}
func makeDeafConsonantUa(r rune) rune {
res, replaced := mUa1[r]
if replaced {
return res
}
return r
}
|
package data
import (
"github.com/pkg/errors"
"upper.io/db.v3/lib/sqlbuilder"
"upper.io/db.v3/postgresql"
)
var DB sqlbuilder.Database
var settings = postgresql.ConnectionURL{
Database: `postgres`,
Host: `db`,
User: `postgres`,
Password: `password`,
}
func SetupDB() error {
var err error
DB, err = postgresql.Open(settings)
if err != nil {
return errors.Wrap(err, "could not connect to database")
}
return nil
} |
package reminderscheduler
import (
"time"
"github.com/malware-unicorn/managed-bots/gcalbot/gcalbot"
)
func (r *ReminderScheduler) sendReminderLoop(shutdownCh chan struct{}) error {
// sleep until the next minute so that the loop executes at the beginning of each minute
now := time.Now()
nextMinute := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()+1, 0, 0, time.Local)
time.Sleep(nextMinute.Sub(now))
ticker := time.NewTicker(time.Minute)
defer func() {
ticker.Stop()
r.Debug("shutting down sendReminderLoop")
}()
r.sendReminders(time.Now())
for {
select {
case <-shutdownCh:
return nil
case sendMinute := <-ticker.C:
r.sendReminders(sendMinute)
}
}
}
func (r *ReminderScheduler) sendReminders(sendMinute time.Time) {
timestamp := getReminderTimestamp(sendMinute, 0)
r.minuteReminders.ForEachReminderMessageInMinute(timestamp, func(msg *ReminderMessage) {
for duration := range msg.MinuteReminders {
msgTimestamp := getReminderTimestamp(msg.StartTime, duration)
if msgTimestamp == timestamp {
minutesBefore := gcalbot.GetMinutesFromDuration(duration)
if minutesBefore == 0 {
r.ChatEcho(msg.KeybaseConvID, "An event is starting now: %s", msg.MsgContent)
} else {
r.ChatEcho(msg.KeybaseConvID, "An event is starting in %s: %s",
gcalbot.MinutesBeforeString(minutesBefore), msg.MsgContent)
}
delete(msg.MinuteReminders, duration)
r.stats.Count("sendReminders - reminder")
}
}
if len(msg.MinuteReminders) == 0 {
r.subscriptionReminders.RemoveReminderMessageFromSubscription(msg)
r.eventReminders.RemoveReminderMessageFromEvent(msg)
r.Debug("removed event with no reminders %s", msg.EventID)
// the entire minute will be removed, and since this is the event's last minute there is no need to delete 'all' minutes
}
})
r.minuteReminders.RemoveMinute(timestamp)
sendDuration := time.Since(sendMinute)
if sendDuration.Seconds() > 15 {
r.Errorf("sending reminders took %s", sendDuration.String())
}
r.stats.Value("sendReminders - duration - seconds", sendDuration.Seconds())
}
|
package main
import (
"fmt"
"sync"
"time"
)
// ScoreUpdate response sent back
type ScoreUpdate struct {
CurrentScore Score `json:"currentScore"`
LastScore LastScore `json:"lastScore"`
}
// Score keep track of the scores
type Score struct {
Blue int `json:"blue"`
Red int `json:"red"`
}
// LastScore keeps track of the last game score
type LastScore struct {
Score Score `json:"score"`
Timestamp time.Time `json:"timestamp"`
Displayable bool `json:"displayable"`
}
// ScoreKeeper contains score keeping logic
type ScoreKeeper struct {
Blue int `json:"blue"`
Red int `json:"red"`
subscribers []chan ScoreUpdate
mutex sync.Mutex
lastScore LastScore
}
// ResetScore resets the score
func (currentScore *ScoreKeeper) ResetScore() {
currentScore.Red = 0
currentScore.Blue = 0
go currentScore.broadcast()
}
// UpdateScore increments the current score
func (currentScore *ScoreKeeper) UpdateScore(addScore Score) {
currentScore.Red += addScore.Red
currentScore.Blue += addScore.Blue
if currentScore.Red >= 10 || currentScore.Blue >= 10 {
currentScore.lastScore = LastScore{Score{currentScore.Blue, currentScore.Red}, time.Now(), true}
currentScore.Red = 0
currentScore.Blue = 0
}
go currentScore.broadcast()
}
/*
Send message to each subscriber with current score
*/
func (currentScore *ScoreKeeper) broadcast() {
for _, subscriber := range currentScore.subscribers {
score := Score{Red: currentScore.Red, Blue: currentScore.Blue}
subscriber <- ScoreUpdate{CurrentScore: score, LastScore: currentScore.lastScore}
}
}
// Subscribe Create new channel, add to subscriber list synchronously and then send current score to the channel (async)
func (currentScore *ScoreKeeper) Subscribe() chan ScoreUpdate {
c := make(chan ScoreUpdate)
currentScore.mutex.Lock()
currentScore.subscribers = append(currentScore.subscribers, c)
currentScore.mutex.Unlock()
go func() {
score := Score{Red: currentScore.Red, Blue: currentScore.Blue}
c <- ScoreUpdate{CurrentScore: score, LastScore: currentScore.lastScore}
}()
return c
}
// Unsubscribe Search the channel c between the subscribers synchronously, close it and remove it
func (currentScore *ScoreKeeper) Unsubscribe(c chan ScoreUpdate) {
go func() {
var foundIndex int
var found = false
currentScore.mutex.Lock()
for index, subscriber := range currentScore.subscribers {
if subscriber == c {
found = true
foundIndex = index
close(c)
}
}
if found {
currentScore.subscribers = append(currentScore.subscribers[:foundIndex], currentScore.subscribers[foundIndex+1:]...)
}
currentScore.mutex.Unlock()
fmt.Printf("Removed index: %d, %d remaining subscribers.\n", foundIndex, len(currentScore.subscribers))
}()
}
// ScoreKeeperBuilder Scorekeeper "class" constructor
func ScoreKeeperBuilder() ScoreKeeper {
return ScoreKeeper{0, 0, []chan ScoreUpdate{}, sync.Mutex{}, LastScore{}}
}
|
package database
import (
"FPproject/Backend/log"
"FPproject/Backend/models"
"time"
)
func (d *Database) InsertUH(id string, h models.UserHealth) (string, error) {
res, err := d.db.Exec("INSERT INTO userhealth(id, gender, height, weight, dob, active, target, created, updated) VALUES(?,?,?,?,?,?,?,?,?)",
id, h.Gender, h.Height, h.Weight, h.DOB, h.Active, h.Target, time.Now(), time.Now())
if err != nil {
log.Warning.Println(err)
return "", err
}
affected, err := res.RowsAffected()
if err != nil {
log.Warning.Println(err)
return "", err
} else if affected < 1 {
log.Warning.Println(ErrNoRowsAffected)
return "", ErrNoRowsAffected
}
return id, nil
}
func (d *Database) DelUH(id string) (string, error) {
res, err := d.db.Exec("DELETE FROM userhealth WHERE id=?", id)
if err != nil {
log.Warning.Println(err)
return "", err
}
affected, err := res.RowsAffected()
if err != nil {
log.Warning.Println(err)
return "", err
} else if affected < 1 {
log.Warning.Println(ErrNoRowsAffected)
return "", ErrNoRowsAffected
}
return id, nil
}
func (d *Database) UpdateUH(h models.UserHealth) (string, error) {
res, err := d.db.Exec("UPDATE userhealth SET gender=?, height=?, weight=?, dob=?, active=?, target=?, updated=? WHERE id=?",
h.Gender, h.Height, h.Weight, h.DOB, h.Active, h.Target, time.Now(), h.ID)
if err != nil {
log.Warning.Println(err)
return "", err
}
affected, err := res.RowsAffected()
if err != nil {
log.Warning.Println(err)
return "", err
} else if affected < 1 {
log.Warning.Println(ErrNoRowsAffected)
return "", ErrNoRowsAffected
}
return h.ID, nil
}
func (d *Database) GetUH(id string) (models.UserHealth, error) {
var h models.UserHealth
err := d.db.QueryRow("SELECT * FROM userhealth WHERE id=?", id).Scan(&h.ID,
&h.Gender, &h.Height, &h.Weight, &h.DOB, &h.Active, &h.Target, &h.Created, &h.Updated)
if err != nil {
log.Warning.Println(err)
return h, err
}
return h, nil
}
|
package testdata
import (
"github.com/frk/gosql/internal/testdata/common"
)
type FilterNestedRecords struct {
_ *common.Nested `rel:"test_nested:n"`
common.FilterMaker
}
|
package model
import (
"Blog/util/errmsg"
"errors"
"github.com/jinzhu/gorm"
)
type Post struct {
Category Category `gorm:"foreignKey:Cid" json:"category,omitempty"`
gorm.Model
Title string `gorm:"type:varchar(100);not null" json:"title,omitempty"`
Cid int `json:"cid,omitempty"`
Desc string `gorm:"type:varchar(200);" json:"desc,omitempty"`
// 试试[]string怎么处理
// 会引发panic, 可恶
Tags string `gorm:"type:varchar(300);" json:"tags,omitempty"`
Content string `gorm:"type:longtext;" json:"content,omitempty"`
}
// 新增文章
func InsertPost(post *Post) errmsg.Code {
//post.Password = scryptPassword(post.Password)
err := db.Create(&post).Error
if err != nil {
return errmsg.ERROR
}
return errmsg.SUCCESS
}
// 查询单个文章
func GetPost(id int) (*Post, errmsg.Code) {
var p Post
err := db.Preload("Category").First(&p, id).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return &p, errmsg.ERROR_POST_NOT_FOUND
} else {
logger.Debug("Fail to get post:", err)
return &p, errmsg.ERROR
}
}
return &p, errmsg.SUCCESS
}
// 查询分类下所有文章
func GetPostByCate(cid, pageSize, pageNum int) ([]Post, errmsg.Code) {
var posts []Post
err := db.Preload("Category").Limit(pageSize).Offset((pageNum-1)*pageSize).Where("cid = ?", cid).Find(&posts).Error
if err != nil {
return nil, errmsg.ERROR
}
return posts, errmsg.SUCCESS
}
// 查询文章表
func GetPostsList(pageSize int, pageNum int) ([]Post, errmsg.Code) {
var posts []Post
// limit: 指定需要多少条; offset:指定从哪一条开始
err := db.Preload("Category").Limit(pageSize).Offset((pageNum - 1) * pageSize).Find(&posts).Error
if err != nil && err != gorm.ErrRecordNotFound {
return nil, errmsg.ERROR
}
return posts, errmsg.SUCCESS
}
// 编辑文章信息
func EditPost(post *Post) errmsg.Code {
logger.Debug("Edit post:", post)
err := db.Model(post).Updates(post).Error
if err != nil {
return errmsg.ERROR
}
return errmsg.SUCCESS
}
// 删除文章
func DeletePost(id int) errmsg.Code {
if err = db.Delete(&Post{}, id).Error; err != nil {
return errmsg.ERROR
}
return errmsg.SUCCESS
}
|
package templatecode
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
)
const (
index = `{{define "Content"}}<div>久等网络</div>{{end}}`
)
// CreateController 创建文件
// name: 文件名称
// path: 文件所在文件夹路径
func CreateController(project, name, path string, isCreate ...bool) {
create(project, name, path, 1, isCreate...)
}
// CreateServices 创建文件
// name: 文件名称
// path: 文件所在文件夹路径
func CreateServices(project, name, path string) {
create(project, name, path, 2)
}
// create 创建文件
// name: 文件名称
// path: 文件所在文件夹路径
// fType: 所创建文件类型 1-controller 2-services
func create(project, name, path string, fType int, isCreate ...bool) {
var filePath = path + "/" + name + ".go"
var fileType = ""
var target = ""
var targetChild = ""
var tplString = ""
switch fType {
case 1: //controllers
fileType = "controllers"
target = initName(name) + "Controller"
fmt.Println("target:-----------", target)
targetChild = "BaseController"
tplString = getTemplateController()
var l = len(isCreate)
if l >= 1 {
if isCreate[0] {
updateRouters(initName(name), path)
}
if l >= 2 {
if isCreate[1] {
createView(strings.ToLower(initName(name)), path)
}
}
}
case 2: //services
fileType = "services"
target = initName(name) + "Services"
targetChild = ""
tplString = getTemplateServices()
}
var result = replaceType(fileType, tplString)
result = replaceTarget(target, result)
result = replaceProject(project, result)
result = replaceParents(targetChild, result)
_, er := createFile(filePath, result)
if er != nil {
fmt.Println("创建文件失败:-----", er)
}
}
func createView(name, path string) {
var info = strings.Split(path, "controllers")
var viewDir = info[0] + "/views/" + name
if createDirectory(viewDir) {
_, er := createFile(viewDir+"/index.html", index)
if er != nil {
fmt.Println("创建文件失败:-----", er)
}
}
}
func updateRouters(controllerName, path string) {
var info = strings.Split(path, "controllers")
var filePath = info[0] + "/routers/routers.go"
var cn, er = ioutil.ReadFile(filePath)
if er != nil {
fmt.Println("读取router文件失败:", er)
return
}
var res = replaceRouters(controllerName, string(cn))
er = ioutil.WriteFile(filePath, []byte(res), os.ModeAppend)
if er != nil {
fmt.Println("更新router文件失败:", er)
return
}
}
func createFile(filePath, content string) (int, error) {
var f1, er = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE, 0666)
if er != nil {
fmt.Println("openfile__er打开文件失败:", er)
return 0, er
}
defer f1.Close()
return f1.WriteString(content)
}
func createDirectory(path string) bool {
var directory, err = os.Open(path)
if os.IsNotExist(err) { //判断文件夹是否存在
err = os.MkdirAll(path, 0777) //创建目录
if err != nil {
fmt.Println("创建文件夹失败!", err)
}
} else {
directory.Close()
fmt.Println("项目文件夹已存在")
return false
}
if err != nil {
return false
}
return true
}
// initName 初始化Name(首字母大写,下划线+小写字母替换成大写字母)
// name: 表名
// return: string 返回初始化后的名称
func initName(name string) string {
var reg = regexp.MustCompile(`^([a-z])|(_[a-z])+`)
var n = reg.ReplaceAllStringFunc(name, strings.ToUpper)
reg = regexp.MustCompile(`(_)+`)
return reg.ReplaceAllString(n, "")
}
// replaceType 替换template的包名
// typeName: 包名
// strModel: template字符串内容
// return: string-替换后的数据
func replaceType(typeName, strModel string) string {
var reg = regexp.MustCompile(`filetype`)
return reg.ReplaceAllString(strModel, typeName)
}
// replaceTarget 替换template的结构体名称
// targetName: 结构体名称
// strModel: template字符串内容
// return: string-替换后的数据
func replaceTarget(targetName, strModel string) string {
var reg = regexp.MustCompile(`target`)
return reg.ReplaceAllString(strModel, initName(targetName))
}
func replaceRouters(controllerName, strModel string) string {
var reg = regexp.MustCompile(`//next`)
var result = reg.ReplaceAllString(strModel, templateRouters)
reg = regexp.MustCompile(`qazname`)
var name = initName(controllerName)
result = reg.ReplaceAllString(result, strings.ToLower(name))
reg = regexp.MustCompile(`zaqcontroller`)
return reg.ReplaceAllString(result, name+"Controller")
}
// replaceProject 替换template的包的项目名称
// projectName: 项目名称
// strModel: template字符串内容
// return: string-替换后的数据
func replaceProject(projectName, strModel string) string {
var reg = regexp.MustCompile(`project`)
return reg.ReplaceAllString(strModel, strings.ToLower(projectName))
}
// replaceChild 替换template的结构体所继承的结构体名称
// name: 结构体名称
// strModel: template字符串内容
// return: string-替换后的数据
func replaceParents(name, strModel string) string {
var reg = regexp.MustCompile(`child`)
return reg.ReplaceAllString(strModel, initName(name))
}
// getTemplateController 获取模版字符串
func getTemplateController() string {
return templateController
}
// getTemplateString 获取模版字符串
func getTemplateServices() string {
return templateServices
}
|
package config
import (
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/widuu/goini"
"strconv"
)
// 配置项
type Options struct {
Port int
TlsCertFile string
TlsKeyFile string
SidecarCfgFile string
}
// 解析配置文件
func ParseConf(c *cli.Context) (*Options, error) {
options := &Options{}
var err error
var conf *goini.Config
if c.IsSet("configure") || c.IsSet("C") {
if c.IsSet("configure") {
conf = goini.SetConfig(c.String("configure"))
} else {
conf = goini.SetConfig(c.String("C"))
}
port := conf.GetValue("common", "port")
if options.Port, err = strconv.Atoi(port); nil != err {
log.Errorf("%v", err)
options.Port = 443
}
options.TlsCertFile = conf.GetValue("common", "tlsCertFile")
if "" == options.TlsCertFile {
options.TlsCertFile = "/etc/webhook/certs/cert.pem"
}
options.TlsKeyFile = conf.GetValue("common", "tlsKeyFile")
if "" == options.TlsKeyFile {
options.TlsKeyFile = "/etc/webhook/certs/key.pem"
}
options.SidecarCfgFile = conf.GetValue("common", "sidecarCfgFile")
if "" == options.SidecarCfgFile {
options.SidecarCfgFile = "/etc/webhook/config/sidecarconfig.yaml"
}
return options, nil
} else {
options.Port = 443
options.TlsCertFile = "/etc/webhook/certs/cert.pem"
options.TlsKeyFile = "/etc/webhook/certs/key.pem"
options.SidecarCfgFile = "/etc/webhook/config/sidecarconfig.yaml"
return options, nil
}
}
|
package main
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"github.com/ubclaunchpad/inertia/common"
"github.com/ubclaunchpad/inertia/daemon/inertiad/log"
)
// envHandler manages requests to manage environment variables
func envHandler(w http.ResponseWriter, r *http.Request) {
if deployment == nil {
http.Error(w, msgNoDeployment, http.StatusPreconditionFailed)
return
}
if r.Method == "POST" {
envPostHandler(w, r)
} else if r.Method == "GET" {
envGetHandler(w, r)
}
}
func envPostHandler(w http.ResponseWriter, r *http.Request) {
// Set up logger
logger := log.NewLogger(log.LoggerOptions{
Stdout: os.Stdout,
HTTPWriter: w,
})
// Parse request
body, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.WriteErr(err.Error(), http.StatusLengthRequired)
return
}
defer r.Body.Close()
var envReq common.EnvRequest
err = json.Unmarshal(body, &envReq)
if err != nil {
logger.WriteErr(err.Error(), http.StatusBadRequest)
return
}
if envReq.Name == "" {
logger.WriteErr("no variable name provided", http.StatusBadRequest)
}
manager, found := deployment.GetDataManager()
if !found {
logger.WriteErr("no environment manager found", http.StatusPreconditionFailed)
return
}
// Add, update, or remove values from storage
if envReq.Remove {
err = manager.RemoveEnvVariable(envReq.Name)
} else {
err = manager.AddEnvVariable(
envReq.Name, envReq.Value, envReq.Encrypt,
)
}
if err != nil {
logger.WriteErr(err.Error(), http.StatusInternalServerError)
return
}
logger.WriteSuccess("environment variable saved - this will be applied the next time your container is started", http.StatusAccepted)
}
func envGetHandler(w http.ResponseWriter, r *http.Request) {
// Set up logger
logger := log.NewLogger(log.LoggerOptions{
Stdout: os.Stdout,
HTTPWriter: w,
})
manager, found := deployment.GetDataManager()
if !found {
logger.WriteErr("no environment manager found", http.StatusPreconditionFailed)
return
}
values, err := manager.GetEnvVariables(false)
if err != nil {
logger.WriteErr(err.Error(), http.StatusInternalServerError)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(values)
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
)
func main() {
var r = bufio.NewReader(os.Stdin)
var n, k int
fmt.Fscan(r, &n, &k)
var x = make([]int, n)
for i := 0; i < n; i++ {
fmt.Fscan(r, &x[i])
}
var solution = Solve(x, k)
fmt.Println(solution)
}
func Solve(x []int, k int) int {
sort.Ints(x)
var count int
for i := 0; i < len(x); {
var j = findNextCityThatCanCoverMe(x, i, k)
// fmt.Printf("x[%v]=%v can cover x[%v]=%v\n", j, x[j], i, x[i])
count++
var p = findNextUncoveredCity(x, j, k)
// all cities are covered. No uncovered city
if p < 0 {
break
}
// fmt.Printf("next uncovered city after x[%v]=%v is x[%v]=%v\n", i, x[i], p, x[p])
i = p
}
return count
}
func findNextCityThatCanCoverMe(x []int, i, k int) int {
for q := i + 1; q < len(x); q++ {
if x[q]-k > x[i] {
return q - 1
}
}
return len(x) - 1 // last city
}
func findNextUncoveredCity(x []int, i, k int) int {
for q := i + 1; q < len(x); q++ {
if x[q] > x[i]+k {
return q
}
}
return -1
}
|
// MIT License
//
// Copyright (c) 2016 C.T.Chen
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// Package ssvg is for generate simple SVG, support muti-frame animation.
package ssvg
import (
"fmt"
"io"
"math"
"os"
)
var colorTable = []string{
"red",
"yellow",
"green",
"blue",
"cyan",
"purple",
"brown",
"chocolate",
"crimson",
"darkgoldenrod",
"darkkhaki",
"darkslateblue",
"darkslategrey",
"darkred",
"darkblue",
"darkcyan",
"goldenrod",
"fuchsia",
"deepskyblue",
"darkorange",
"orangered",
"olive",
"mediumseagreen",
"midnightblue",
"sandybrown",
"lightcoral",
"lightseagreen",
}
// DefaultColors is easy way to choose color for serial of items.
// index can be any nonnegative integer.
func DefaultColors(index int) string {
return colorTable[index%len(colorTable)]
}
// Style keep styles for element.
type Style struct {
// Fill color
FillColor string
// Stroke color
StrokeColor string
// Stroke width in millimetres, 0 is treat as 1
StrokeWidth float64
// Transparent level between 0~1, opposite to opacity.
Transparency float64
}
// Write style attributes to w, for svg.
func (style *Style) Write(w io.Writer, svg *Svg) {
fc := style.FillColor
sc := style.StrokeColor
sw := style.StrokeWidth
op := 1 - style.Transparency
if fc == "" {
fc = "none"
}
if sc == "" {
sc = "#000000"
}
if sw <= 0 {
sw = 1
}
if op > 0 {
fmt.Fprintf(w, ` style="fill:`+fc+
`;stroke:`+sc+
`;stroke-width:%g" opacity="%g"`, sw*svg.unit, op)
} else {
fmt.Fprintf(w, ` style="fill:`+fc+
`;stroke:`+sc+
`;stroke-width:%g" `, sw*svg.unit)
}
}
// Element is abstract element.
type Element interface {
Write(w io.Writer, svg *Svg)
Range(xmin, xmax, ymin, ymax *float64)
}
// 折线等元素中的点, 如需"点状图元"请用Icon
type Point struct {
X, Y float64
}
//func (this * Point) Write(w * io.Writer, unit, pr float64) {
// s := fmt.Sprintf("<circle cx=\"%g\" cy=\"%g\" r=\"%g\" ", this.X, this.Y, pr)
// io.WriteString(w, s)
// Style.Write(w, svg)
// io.WriteString(w, " /> \n")
// break;
//}
func include(xmin, xmax, ymin, ymax *float64, x, y float64) {
*xmin = math.Min(*xmin, x)
*xmax = math.Max(*xmax, x)
*ymin = math.Min(*ymin, y)
*ymax = math.Max(*ymax, y)
}
// 简易图标, 大小是固定的
type Icon struct {
X, Y float64
Shape string // "box", "circle"
Zoom float64 // 0 ~ 1.0
Style
}
func (this *Icon) _infSize() {
}
func (this *Icon) Range(xmin, xmax, ymin, ymax *float64) {
// 无需考虑宽度, 因为输出的图形本来就留有边界
include(xmin, xmax, ymin, ymax, this.X, this.Y)
}
func (this *Icon) Write(w io.Writer, svg *Svg) {
is := svg.iconSize
if this.Zoom != 0 {
is *= this.Zoom
}
r := is * 0.5
switch this.Shape {
case "circle":
p := &Circle{Cx: this.X, Cy: this.Y, R: r, Style: this.Style}
p.Write(w, svg)
default:
fallthrough
case "box":
p := &Rect{X: this.X - r, Y: this.Y - r, W: is, H: is, Style: this.Style}
p.Write(w, svg)
case "x":
fallthrough
case "cross":
p := &Line{X1: this.X - r, Y1: this.Y - r, X2: this.X + r, Y2: this.Y + r, Style: this.Style}
p.Write(w, svg)
p = &Line{X1: this.X - r, Y1: this.Y + r, X2: this.X + r, Y2: this.Y - r, Style: this.Style}
p.Write(w, svg)
case "+":
fallthrough
case "plus":
p := &Line{X1: this.X, Y1: this.Y - r, X2: this.X, Y2: this.Y + r, Style: this.Style}
p.Write(w, svg)
p = &Line{X1: this.X - r, Y1: this.Y, X2: this.X + r, Y2: this.Y, Style: this.Style}
p.Write(w, svg)
}
}
type Line struct {
X1, Y1, X2, Y2 float64
AuxLeft bool // 左侧辅助线
AuxRight bool // 右侧辅助线
Arrow bool // 箭头
Style
}
func (this *Line) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, this.X1, this.Y1)
include(xmin, xmax, ymin, ymax, this.X2, this.Y2)
}
func (this *Line) Write(w io.Writer, svg *Svg) {
if (this.Arrow || this.AuxLeft || this.AuxRight) &&
(this.X1 != this.X2 || this.Y1 != this.Y2) {
offset := svg.pixSize * 1.5
// 垂直方向(逆时针90度)
dx, dy := this.Y1-this.Y2, this.X2-this.X1
a := math.Sqrt(dx*dx + dy*dy)
dx, dy = offset*dx/a, offset*dy/a
m := *this
if m.StrokeWidth <= 0 {
m.StrokeWidth = 0.3
} else {
m.StrokeWidth *= 0.3
}
m.AuxLeft = false
m.AuxRight = false
m.Arrow = false
if this.AuxLeft {
m.X1, m.Y1 = this.X1+dx, this.Y1+dy
m.X2, m.Y2 = this.X2+dx, this.Y2+dy
m.Write(w, svg)
}
if this.AuxRight {
m.X1, m.Y1 = this.X1-dx, this.Y1-dy
m.X2, m.Y2 = this.X2-dx, this.Y2-dy
m.Write(w, svg)
}
if this.Arrow {
m.StrokeWidth = this.StrokeWidth
dx1, dy1 := this.X2-this.X1, this.Y2-this.Y1
dx1, dy1 = svg.iconSize*dx1/a, svg.iconSize*dy1/a
m.X2, m.Y2 = this.X2, this.Y2
m.X1, m.Y1 = this.X2-dx1+dx*1, this.Y2-dy1+dy*1
m.Write(w, svg)
m.X1, m.Y1 = this.X2-dx1-dx*1, this.Y2-dy1-dy*1
m.Write(w, svg)
//m.X1, m.Y1 = this.X1+dx, this.Y1+dy
//m.X2, m.Y2 = this.X2+dx, this.Y2+dy
//m.Write(w, svg)
}
}
fmt.Fprintf(w, "<line x1=\"%g\" y1=\"%g\" x2=\"%g\" y2=\"%g\" ", this.X1, this.Y1, this.X2, this.Y2)
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type HLine struct {
Y float64
Style
}
func (this *HLine) _infSize() {
}
func (this *HLine) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, *xmin, this.Y)
}
func (this *HLine) Write(w io.Writer, svg *Svg) {
fmt.Fprintf(w, "<line x1=\"%g\" y1=\"%g\" x2=\"%g\" y2=\"%g\" ", svg.xmin, this.Y, svg.xmax, this.Y)
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type VLine struct {
X float64
Style
}
func (this *VLine) _infSize() {
}
func (this *VLine) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, this.X, *xmin)
}
func (this *VLine) Write(w io.Writer, svg *Svg) {
fmt.Fprintf(w, "<line x1=\"%g\" y1=\"%g\" x2=\"%g\" y2=\"%g\" ", this.X, svg.ymin, this.X, svg.ymax)
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type Polygon struct {
Points []Point
Style
}
func (this *Polygon) Range(xmin, xmax, ymin, ymax *float64) {
for _, pt := range this.Points {
include(xmin, xmax, ymin, ymax, pt.X, pt.Y)
}
}
func (this *Polygon) Write(w io.Writer, svg *Svg) {
io.WriteString(w, "<polygon points=\"")
for _, pt := range this.Points {
fmt.Fprintf(w, "%g,%g ", pt.X, pt.Y)
}
io.WriteString(w, "\" ")
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type Polyline struct {
Points []Point
Style
}
func (this *Polyline) Range(xmin, xmax, ymin, ymax *float64) {
for _, pt := range this.Points {
include(xmin, xmax, ymin, ymax, pt.X, pt.Y)
}
}
func (this *Polyline) Write(w io.Writer, svg *Svg) {
io.WriteString(w, "<polyline points=\"")
for _, pt := range this.Points {
fmt.Fprintf(w, "%g,%g ", pt.X, pt.Y)
}
io.WriteString(w, "\" ")
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type Circle struct {
Cx, Cy, R float64
Style
}
func (this *Circle) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, this.Cx-this.R, this.Cy-this.R)
include(xmin, xmax, ymin, ymax, this.Cx+this.R, this.Cy+this.R)
}
func (this *Circle) Write(w io.Writer, svg *Svg) {
fmt.Fprintf(w, "<circle cx=\"%g\" cy=\"%g\" r=\"%g\" ", this.Cx, this.Cy, this.R)
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type Rect struct {
X, Y, W, H float64
Style
}
func (this *Rect) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, this.X, this.Y)
include(xmin, xmax, ymin, ymax, this.X+this.W, this.Y+this.H)
}
func (this *Rect) Write(w io.Writer, svg *Svg) {
fmt.Fprintf(w, "<rect x=\"%g\" y=\"%g\" width=\"%g\" height=\"%g\" ", this.X, this.Y, this.W, this.H)
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type Ellipse struct {
Cx, Cy, Rx, Ry float64
Style
}
func (this *Ellipse) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, this.Cx-this.Rx, this.Cy-this.Ry)
include(xmin, xmax, ymin, ymax, this.Cx+this.Rx, this.Cy+this.Ry)
}
func (this *Ellipse) Write(w io.Writer, svg *Svg) {
fmt.Fprintf(w, "<ellipse cx=\"%g\" cy=\"%g\" rx=\"%g\" ry=\"%g\" ", this.Cx, this.Cy, this.Rx, this.Ry)
this.Style.Write(w, svg)
io.WriteString(w, " /> \n")
}
type Text struct {
X, Y float64
Text string
Zoom float64
Style
}
func (this *Text) Range(xmin, xmax, ymin, ymax *float64) {
include(xmin, xmax, ymin, ymax, this.X, this.Y)
}
func (this *Text) Write(w io.Writer, svg *Svg) {
if svg.YDown {
fmt.Fprintf(w, "<text x=\"%g\" y=\"%g\" ", this.X, this.Y)
} else {
fmt.Fprintf(w, "<text transform=\" translate(%g,%g) scale(1, -1)\" ",
this.X, this.Y)
}
fc := this.FillColor
sc := this.StrokeColor
sw := this.StrokeWidth
fs := this.Zoom
if fc == "" {
fc = "#000000"
}
if sc == "" {
sc = "none"
}
if fs == 0 {
fs = 1
}
fmt.Fprintf(w, ` style="fill:`+fc+
`;stroke:`+sc+
`;stroke-width:%g`, sw*svg.unit)
fmt.Fprintf(w, ";font-size:%gem\" >", fs*svg.unit)
fmt.Fprintf(w, this.Text)
fmt.Fprintf(w, "</text>\n")
}
type Frame struct {
Elements []Element
Duration int // 毫秒
KeepVisible bool
}
// 画布范围(逻辑坐标)的确定是自动的, 但要先统计有限的图元, 再统计无限的图元
// 这个接口用来标记无限图元, 以保证它们在有限图元之后计算
type _infSize interface {
_infSize()
}
func isAutoSize(e Element) bool {
_, b := e.(_infSize)
return b
}
func (this *Frame) Range1(xmin, xmax, ymin, ymax *float64) {
for _, e := range this.Elements {
if isAutoSize(e) {
continue
}
e.Range(xmin, xmax, ymin, ymax)
}
}
func (this *Frame) Range2(xmin, xmax, ymin, ymax *float64) {
for _, e := range this.Elements {
if isAutoSize(e) {
e.Range(xmin, xmax, ymin, ymax)
}
}
}
func (this *Frame) Add(e Element) {
this.Elements = append(this.Elements, e)
}
type Svg struct {
CanvasSize int // pixels
Frames []*Frame
iconSize float64
pixSize float64
xmin, xmax float64
ymin, ymax float64
unit float64
YDown bool
FrameDuration int // default frame duration
}
//func (this *Svg) KeepLastFrameVisible() {
// this.CurrentFrame().KeepVisible = true
//}
func (this *Svg) CurrentFrame() *Frame {
if len(this.Frames) == 0 {
return this.NextFrame()
}
return this.Frames[len(this.Frames)-1]
}
func (this *Svg) NextFrame() *Frame {
f := new(Frame)
//f.Duration = 333
//if len(this.Frames) == 0 {
// f.KeepVisible = true
//}
this.Frames = append(this.Frames, f)
return f
}
func (this *Svg) Add(e Element) {
this.CurrentFrame().Add(e)
}
// , file);
func (this *Svg) WriteFile(filename string, canvasPixelSize float64) error {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
this.Write(f, canvasPixelSize)
return nil
}
func (this *Svg) Write(w io.Writer, canvasPixelSize float64) {
fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\" ?> \n")
fmt.Fprintf(w, "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\" \n")
fmt.Fprintf(w, " \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\"> \n")
xmin := math.MaxFloat32
ymin := math.MaxFloat32
xmax := -math.MaxFloat32
ymax := -math.MaxFloat32
if len(this.Frames) == 0 {
xmin, ymin, xmax, ymax = 0, 0, 0, 0
} else {
for _, f := range this.Frames {
f.Range1(&xmin, &xmax, &ymin, &ymax)
}
for _, f := range this.Frames {
f.Range2(&xmin, &xmax, &ymin, &ymax)
}
}
dx := xmax - xmin
dy := ymax - ymin
if dx == 0 && dy == 0 {
dx = 0.001
dy = 0.001
} else if dx == 0 && dy != 0 {
dx = dy
} else if dy == 0 && dx != 0 {
dy = dx
}
var pw, ph float64
if canvasPixelSize <= 0 {
pw = dx
ph = dy
} else {
if dx > dy {
pw = canvasPixelSize
ph = pw * dy / dx
} else {
ph = canvasPixelSize
pw = ph * dx / dy
}
}
this.xmin, this.xmax = xmin, xmax
this.ymin, this.ymax = ymin, ymax
// rc := &Rect{X: xmin, Y: ymin, W: dx, H: dy,
// Style: Style{StrokeColor: "lightgray", StrokeWidth: 0.5}}
const border = 5.0
lbx := border * dx / pw
lby := border * dx / pw
pw += border * 2
ph += border * 2
xmin -= lbx
xmax += lbx
ymin -= lby
ymax += lby
dx = xmax - xmin
dy = ymax - ymin
diagonal := math.Sqrt(dx*dx + dy*dy)
// pr := diagonal / 200.0
this.unit = 0.3 * diagonal / math.Sqrt(pw*pw+ph*ph) // about one pixel
fmt.Fprintf(w, "<svg width=\"%dmm\" height=\"%dmm\" viewBox=\"0 0 %g %g\" \n xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> \n",
int(math.Ceil(pw)), int(math.Ceil(ph)), dx, dy)
defer io.WriteString(w, "</svg>\n")
if !this.YDown {
fmt.Fprintf(w, "<g transform=\"scale(1, -1) translate(%g, %g)\" > \n", -xmin, -ymin-dy)
} else {
fmt.Fprintf(w, "<g transform=\"translate(%g, %g) \" > \n", -xmin, -ymin)
}
defer io.WriteString(w, "</g>\n")
// 背景框
this.iconSize = 0.3 * (lbx + lby)
this.pixSize = 0.5 * (dx/pw + dy/ph)
//rc.Write(w, this)
if len(this.Frames) == 1 {
for _, e := range this.Frames[0].Elements {
e.Write(w, this)
}
return
}
dfd := this.FrameDuration
if dfd == 0 {
dfd = 1000
}
var begin int
for _, f := range this.Frames {
fd := f.Duration
if fd == 0 {
fd = dfd
}
io.WriteString(w, "<g visibility=\"hidden\">\n")
fmt.Fprintf(w, "<set attributeName=\"visibility\" attributeType=\"CSS\" to=\"visible\" begin=\"%dms\" dur=\"%dms\" fill=\"freeze\" />\n",
begin, fd)
begin += fd
if !f.KeepVisible {
fmt.Fprintf(w, "<set attributeName=\"visibility\" attributeType=\"CSS\" to=\"hidden\" begin=\"%dms\" dur=\"1s\" fill=\"freeze\" />\n",
begin)
}
for _, e := range f.Elements {
e.Write(w, this)
}
io.WriteString(w, "</g>\n")
}
}
|
package lexec
import (
"io"
"sync"
)
// Stream represents execution output stream.
type Stream string
const (
// Stdout is ID for execution stdout.
Stdout Stream = `stdout`
// Stdout is ID for execution stderr.
Stderr Stream = `stderr`
// Start is ID for execution start.
Launch Stream = `launch`
// Fininsh is ID for execution finish.
Finish Stream = `finish`
)
// StreamData represents execution output stream data.
type StreamData struct {
Stream Stream
// Data represents output that has been written into given stream.
Data []byte
}
type streamWriter struct {
output *[]StreamData
stream Stream
mutex *sync.Mutex
}
func (writer *streamWriter) Write(data []byte) (int, error) {
writer.mutex.Lock()
defer writer.mutex.Unlock()
indirected := make([]byte, len(data))
copy(indirected, data)
*writer.output = append(*writer.output, StreamData{
Stream: writer.stream,
Data: indirected,
})
return len(indirected), nil
}
func newStreamWriter(
output *[]StreamData,
mutex *sync.Mutex,
stream Stream,
) io.Writer {
return &streamWriter{
output: output,
stream: stream,
mutex: mutex,
}
}
|
package leetcode
import "testing"
func TestIsValid(t *testing.T) {
t.Log(isValid("()"))
t.Log(isValid("()[]{}"))
t.Log(isValid("(]"))
t.Log(isValid("{[]}"))
t.Log(isValid("{["))
t.Log(isValid("]"))
}
|
// Copyright 2020 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package storage
import (
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// S3 type
type S3 struct {
config *aws.Config
}
// NewS3Client creates a new s3 instance
func NewS3Client(key, secret, endpoint, region string) *S3 {
return &S3{
config: &aws.Config{
Credentials: credentials.NewStaticCredentials(key, secret, ""),
Endpoint: aws.String(endpoint),
Region: aws.String(region),
S3ForcePathStyle: aws.Bool(true),
},
}
}
// CreateBucket creates a bucket
func (s *S3) CreateBucket(bucket string) error {
newSession := session.New(s.config)
s3Client := s3.New(newSession)
params := &s3.CreateBucketInput{
Bucket: aws.String(bucket),
}
_, err := s3Client.CreateBucket(params)
if err != nil {
return err
}
return nil
}
// UploadFile upload a file to s3 bucket
func (s *S3) UploadFile(bucket, localPath, remotePath string, includeChecksum bool) error {
file, err := os.Open(localPath)
if err != nil {
return err
}
defer file.Close()
newSession := session.New(s.config)
uploader := s3manager.NewUploader(newSession)
// Upload the file's body to S3 bucket
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(remotePath),
Body: file,
})
if err != nil {
return err
}
if !includeChecksum {
return nil
}
h := sha256.New()
if _, err := io.Copy(h, file); err != nil {
return err
}
checksumContent := fmt.Sprintf("SHA256 CheckSum: %x\n", h.Sum(nil))
checksumPath := fmt.Sprintf("%s-checksum.txt",
strings.TrimSuffix(remotePath, ".tar.gz"),
)
tmpFile, err := ioutil.TempFile(os.TempDir(), "walrus-")
if err != nil {
return err
}
// Remember to clean up the file afterwards
defer os.Remove(tmpFile.Name())
if _, err = tmpFile.Write([]byte(checksumContent)); err != nil {
return err
}
if err := tmpFile.Close(); err != nil {
return err
}
sumFile, err := os.Open(tmpFile.Name())
if err != nil {
return err
}
defer sumFile.Close()
// Upload the file's body to S3 bucket
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(checksumPath),
Body: sumFile,
})
if err != nil {
return err
}
return nil
}
// ListFiles lists files
func (s *S3) ListFiles(bucket, prefix string) ([]string, error) {
newSession := session.New(s.config)
s3Client := s3.New(newSession)
result := []string{}
err := s3Client.ListObjectsPages(&s3.ListObjectsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}, func(p *s3.ListObjectsOutput, last bool) (shouldContinue bool) {
for _, obj := range p.Contents {
result = append(result, fmt.Sprintf("%s", *obj.Key))
}
return true
})
if err != nil {
return result, err
}
return result, nil
}
// DeleteFile delete a file
func (s *S3) DeleteFile(bucket, file string) error {
newSession := session.New(s.config)
s3Client := s3.New(newSession)
_, err := s3Client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(file),
})
if err != nil {
return err
}
return nil
}
// CleanupOld apply a retention policy over a certain path
func (s *S3) CleanupOld(bucket, path string, beforeDays int) (int, error) {
files, err := s.ListFiles(bucket, path)
count := 0
deleteBefore := time.Now().Add(time.Duration(-1*beforeDays*24) * time.Hour)
if err != nil {
return count, err
}
for _, file := range files {
fileName := filepath.Base(file)
fileName = strings.TrimSuffix(fileName, ".tar.gz")
fileName = strings.TrimSuffix(fileName, "-checksum.txt")
fileTime, _ := time.Parse("2006-01-02_15-04-05", fileName)
if fileTime.Before(deleteBefore) {
count++
s.DeleteFile(bucket, file)
}
}
return count, nil
}
|
// Copyright 2016 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitiles
import (
"fmt"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/pubsub/v1"
"go.chromium.org/luci/common/api/gitiles"
"go.chromium.org/luci/common/data/stringset"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
gitilespb "go.chromium.org/luci/common/proto/gitiles"
"go.chromium.org/luci/config/validation"
"go.chromium.org/luci/server/auth"
api "go.chromium.org/luci/scheduler/api/scheduler/v1"
"go.chromium.org/luci/scheduler/appengine/internal"
"go.chromium.org/luci/scheduler/appengine/messages"
"go.chromium.org/luci/scheduler/appengine/task"
)
// defaultMaxTriggersPerInvocation limits number of triggers emitted per one
// invocation.
const defaultMaxTriggersPerInvocation = 100
// TaskManager implements task.Manager interface for tasks defined with
// GitilesTask proto message.
type TaskManager struct {
mockGitilesClient gitilespb.GitilesClient // Used for testing only.
maxTriggersPerInvocation int // Avoid choking on DS or runtime limits.
}
// Name is part of Manager interface.
func (m TaskManager) Name() string {
return "gitiles"
}
// ProtoMessageType is part of Manager interface.
func (m TaskManager) ProtoMessageType() proto.Message {
return (*messages.GitilesTask)(nil)
}
// Traits is part of Manager interface.
func (m TaskManager) Traits() task.Traits {
return task.Traits{
Multistage: false, // we don't use task.StatusRunning state
}
}
// ValidateProtoMessage is part of Manager interface.
func (m TaskManager) ValidateProtoMessage(c *validation.Context, msg proto.Message) {
cfg, ok := msg.(*messages.GitilesTask)
if !ok {
c.Errorf("wrong type %T, expecting *messages.GitilesTask", msg)
return
}
// Validate 'repo' field.
c.Enter("repo")
if cfg.Repo == "" {
c.Errorf("field 'repository' is required")
} else {
u, err := url.Parse(cfg.Repo)
if err != nil {
c.Errorf("invalid URL %q: %s", cfg.Repo, err)
} else if !u.IsAbs() {
c.Errorf("not an absolute url: %q", cfg.Repo)
}
}
c.Exit()
c.Enter("refs")
for _, ref := range cfg.Refs {
if !strings.HasPrefix(ref, "refs/") {
c.Errorf("ref must start with 'refs/' not %q", ref)
}
cnt := strings.Count(ref, "*")
if cnt > 1 || (cnt == 1 && !strings.HasSuffix(ref, "/*")) {
c.Errorf("only trailing (e.g. refs/blah/*) globs are supported, not %q", ref)
}
}
c.Exit()
}
// LaunchTask is part of Manager interface.
func (m TaskManager) LaunchTask(c context.Context, ctl task.Controller) error {
cfg := ctl.Task().(*messages.GitilesTask)
ctl.DebugLog("Repo: %s, Refs: %s", cfg.Repo, cfg.Refs)
u, err := url.Parse(cfg.Repo)
if err != nil {
return err
}
watchedRefs := watchedRefs{}
watchedRefs.init(cfg.GetRefs())
var wg sync.WaitGroup
var heads map[string]string
var headsErr error
wg.Add(1)
go func() {
defer wg.Done()
heads, headsErr = loadState(c, ctl.JobID(), u)
}()
var refs map[string]string
var refsErr error
wg.Add(1)
go func() {
defer wg.Done()
refs, refsErr = m.getRefsTips(c, ctl, cfg.Repo, watchedRefs)
}()
wg.Wait()
if headsErr != nil {
ctl.DebugLog("Failed to fetch heads - %s", headsErr)
return fmt.Errorf("failed to fetch heads: %v", headsErr)
}
if refsErr != nil {
ctl.DebugLog("Failed to fetch refs - %s", refsErr)
return fmt.Errorf("failed to fetch refs: %v", refsErr)
}
refsChanged := 0
// Delete all previously known refs which are either no longer watched or no
// longer exist in repo.
for ref := range heads {
switch {
case !watchedRefs.hasRef(ref):
ctl.DebugLog("Ref %s is no longer watched", ref)
delete(heads, ref)
refsChanged++
case refs[ref] == "":
ctl.DebugLog("Ref %s deleted", ref)
delete(heads, ref)
refsChanged++
}
}
// For determinism, sort keys of current refs.
sortedRefs := make([]string, 0, len(refs))
for ref := range refs {
sortedRefs = append(sortedRefs, ref)
}
sort.Strings(sortedRefs)
emittedTriggers := 0
maxTriggersPerInvocation := m.maxTriggersPerInvocation
if maxTriggersPerInvocation == 0 {
maxTriggersPerInvocation = defaultMaxTriggersPerInvocation
}
// Note, that current `refs` contain only watched refs (see getRefsTips).
for _, ref := range sortedRefs {
newHead := refs[ref]
oldHead, existed := heads[ref]
switch {
case !existed:
ctl.DebugLog("Ref %s is new: %s", ref, newHead)
case oldHead != newHead:
ctl.DebugLog("Ref %s updated: %s => %s", ref, oldHead, newHead)
default:
// No change.
continue
}
heads[ref] = newHead
refsChanged++
emittedTriggers++
// TODO(tandrii): actually look at commits between current and previously
// known tips of each ref.
// In current (v1) engine, all triggers emitted around the same time will
// result in just 1 invocation of each triggered job. Therefore,
// passing just HEAD's revision is good enough.
// For the same reason, only 1 of the refs will actually be processed if
// several refs changed at the same time.
ctl.EmitTrigger(c, &internal.Trigger{
Id: fmt.Sprintf("%s/+/%s@%s", cfg.Repo, ref, newHead),
Title: newHead,
Url: fmt.Sprintf("%s/+/%s", cfg.Repo, newHead),
Payload: &internal.Trigger_Gitiles{
Gitiles: &api.GitilesTrigger{Repo: cfg.Repo, Ref: ref, Revision: newHead},
},
})
// Safeguard against too many changes such as the first run after
// config change to watch many more refs than before.
if emittedTriggers >= maxTriggersPerInvocation {
ctl.DebugLog("Emitted %d triggers, postponing the rest", emittedTriggers)
break
}
}
if refsChanged == 0 {
ctl.DebugLog("No changes detected")
} else {
ctl.DebugLog("%d refs changed", refsChanged)
// Force save to ensure triggers are actually emitted.
if err := ctl.Save(c); err != nil {
// At this point, triggers have not been sent, so bail now and don't save
// the refs' heads newest values.
return err
}
if err := saveState(c, ctl.JobID(), u, heads); err != nil {
return err
}
ctl.DebugLog("Saved %d known refs", len(heads))
}
ctl.State().Status = task.StatusSucceeded
return nil
}
// AbortTask is part of Manager interface.
func (m TaskManager) AbortTask(c context.Context, ctl task.Controller) error {
return nil
}
// HandleNotification is part of Manager interface.
func (m TaskManager) HandleNotification(c context.Context, ctl task.Controller, msg *pubsub.PubsubMessage) error {
return errors.New("not implemented")
}
// HandleTimer is part of Manager interface.
func (m TaskManager) HandleTimer(c context.Context, ctl task.Controller, name string, payload []byte) error {
return errors.New("not implemented")
}
// getRefsTips returns tip for each ref being watched.
func (m TaskManager) getRefsTips(c context.Context, ctl task.Controller, repoURL string, watched watchedRefs) (map[string]string, error) {
host, project, err := gitiles.ParseRepoURL(repoURL)
if err != nil {
return nil, errors.Annotate(err, "invalid repo URL %q", repoURL).Err()
}
g, err := m.getGitilesClient(c, ctl, host)
if err != nil {
return nil, err
}
// Query gitiles for each namespace in parallel.
var wg sync.WaitGroup
var lock sync.Mutex
errs := []error{}
allTips := map[string]string{}
// Group all refs by their namespace to reduce # of RPCs.
for _, wrs := range watched.namespaces {
wg.Add(1)
go func(wrs *watchedRefNamespace) {
defer wg.Done()
res, err := g.Refs(c, &gitilespb.RefsRequest{
Project: project,
RefsPath: wrs.namespace,
})
lock.Lock()
defer lock.Unlock()
if err != nil {
ctl.DebugLog("failed to fetch %q namespace tips for %q: %q", wrs.namespace, err)
errs = append(errs, err)
return
}
for ref, tip := range res.Revisions {
if watched.hasRef(ref) {
allTips[ref] = tip
}
}
}(wrs)
}
wg.Wait()
if len(errs) > 0 {
return nil, errors.NewMultiError(errs...)
}
return allTips, nil
}
func (m TaskManager) getGitilesClient(c context.Context, ctl task.Controller, host string) (gitilespb.GitilesClient, error) {
if m.mockGitilesClient != nil {
// Used for testing only.
logging.Infof(c, "using mockGitilesClient")
return m.mockGitilesClient, nil
}
httpClient, err := ctl.GetClient(c, time.Minute, auth.WithScopes(gitiles.OAuthScope))
if err != nil {
return nil, err
}
return gitiles.NewRESTClient(httpClient, host, true)
}
type watchedRefNamespace struct {
namespace string // no trailing "/".
allChildren bool // if true, someChildren is ignored.
someChildren stringset.Set
}
func (w watchedRefNamespace) hasSuffix(suffix string) bool {
switch {
case suffix == "*":
panic(fmt.Errorf("watchedRefNamespace membership should only be checked for refs, not ref glob %s", suffix))
case w.allChildren:
return true
case w.someChildren == nil:
return false
default:
return w.someChildren.Has(suffix)
}
}
func (w *watchedRefNamespace) addSuffix(suffix string) {
switch {
case w.allChildren:
return
case suffix == "*":
w.allChildren = true
w.someChildren = nil
return
case w.someChildren == nil:
w.someChildren = stringset.New(1)
}
w.someChildren.Add(suffix)
}
type watchedRefs struct {
namespaces map[string]*watchedRefNamespace
}
func (w *watchedRefs) init(refsConfig []string) {
w.namespaces = map[string]*watchedRefNamespace{}
for _, ref := range refsConfig {
ns, suffix := splitRef(ref)
if _, exists := w.namespaces[ns]; !exists {
w.namespaces[ns] = &watchedRefNamespace{namespace: ns}
}
w.namespaces[ns].addSuffix(suffix)
}
}
func (w *watchedRefs) hasRef(ref string) bool {
ns, suffix := splitRef(ref)
if wrn, exists := w.namespaces[ns]; exists {
return wrn.hasSuffix(suffix)
}
return false
}
func splitRef(s string) (string, string) {
if i := strings.LastIndex(s, "/"); i <= 0 {
return s, ""
} else {
return s[:i], s[i+1:]
}
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/beta/compute_beta_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta"
)
// Server implements the gRPC interface for HttpHealthCheck.
type HttpHealthCheckServer struct{}
// ProtoToHttpHealthCheck converts a HttpHealthCheck resource from its proto representation.
func ProtoToHttpHealthCheck(p *betapb.ComputeBetaHttpHealthCheck) *beta.HttpHealthCheck {
obj := &beta.HttpHealthCheck{
CheckIntervalSec: dcl.Int64OrNil(p.CheckIntervalSec),
Description: dcl.StringOrNil(p.Description),
HealthyThreshold: dcl.Int64OrNil(p.HealthyThreshold),
Host: dcl.StringOrNil(p.Host),
Name: dcl.StringOrNil(p.Name),
Port: dcl.Int64OrNil(p.Port),
RequestPath: dcl.StringOrNil(p.RequestPath),
TimeoutSec: dcl.Int64OrNil(p.TimeoutSec),
UnhealthyThreshold: dcl.Int64OrNil(p.UnhealthyThreshold),
CreationTimestamp: dcl.StringOrNil(p.CreationTimestamp),
Project: dcl.StringOrNil(p.Project),
SelfLink: dcl.StringOrNil(p.SelfLink),
}
return obj
}
// HttpHealthCheckToProto converts a HttpHealthCheck resource to its proto representation.
func HttpHealthCheckToProto(resource *beta.HttpHealthCheck) *betapb.ComputeBetaHttpHealthCheck {
p := &betapb.ComputeBetaHttpHealthCheck{
CheckIntervalSec: dcl.ValueOrEmptyInt64(resource.CheckIntervalSec),
Description: dcl.ValueOrEmptyString(resource.Description),
HealthyThreshold: dcl.ValueOrEmptyInt64(resource.HealthyThreshold),
Host: dcl.ValueOrEmptyString(resource.Host),
Name: dcl.ValueOrEmptyString(resource.Name),
Port: dcl.ValueOrEmptyInt64(resource.Port),
RequestPath: dcl.ValueOrEmptyString(resource.RequestPath),
TimeoutSec: dcl.ValueOrEmptyInt64(resource.TimeoutSec),
UnhealthyThreshold: dcl.ValueOrEmptyInt64(resource.UnhealthyThreshold),
CreationTimestamp: dcl.ValueOrEmptyString(resource.CreationTimestamp),
Project: dcl.ValueOrEmptyString(resource.Project),
SelfLink: dcl.ValueOrEmptyString(resource.SelfLink),
}
return p
}
// ApplyHttpHealthCheck handles the gRPC request by passing it to the underlying HttpHealthCheck Apply() method.
func (s *HttpHealthCheckServer) applyHttpHealthCheck(ctx context.Context, c *beta.Client, request *betapb.ApplyComputeBetaHttpHealthCheckRequest) (*betapb.ComputeBetaHttpHealthCheck, error) {
p := ProtoToHttpHealthCheck(request.GetResource())
res, err := c.ApplyHttpHealthCheck(ctx, p)
if err != nil {
return nil, err
}
r := HttpHealthCheckToProto(res)
return r, nil
}
// ApplyHttpHealthCheck handles the gRPC request by passing it to the underlying HttpHealthCheck Apply() method.
func (s *HttpHealthCheckServer) ApplyComputeBetaHttpHealthCheck(ctx context.Context, request *betapb.ApplyComputeBetaHttpHealthCheckRequest) (*betapb.ComputeBetaHttpHealthCheck, error) {
cl, err := createConfigHttpHealthCheck(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyHttpHealthCheck(ctx, cl, request)
}
// DeleteHttpHealthCheck handles the gRPC request by passing it to the underlying HttpHealthCheck Delete() method.
func (s *HttpHealthCheckServer) DeleteComputeBetaHttpHealthCheck(ctx context.Context, request *betapb.DeleteComputeBetaHttpHealthCheckRequest) (*emptypb.Empty, error) {
cl, err := createConfigHttpHealthCheck(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteHttpHealthCheck(ctx, ProtoToHttpHealthCheck(request.GetResource()))
}
// ListComputeBetaHttpHealthCheck handles the gRPC request by passing it to the underlying HttpHealthCheckList() method.
func (s *HttpHealthCheckServer) ListComputeBetaHttpHealthCheck(ctx context.Context, request *betapb.ListComputeBetaHttpHealthCheckRequest) (*betapb.ListComputeBetaHttpHealthCheckResponse, error) {
cl, err := createConfigHttpHealthCheck(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListHttpHealthCheck(ctx, request.Project)
if err != nil {
return nil, err
}
var protos []*betapb.ComputeBetaHttpHealthCheck
for _, r := range resources.Items {
rp := HttpHealthCheckToProto(r)
protos = append(protos, rp)
}
return &betapb.ListComputeBetaHttpHealthCheckResponse{Items: protos}, nil
}
func createConfigHttpHealthCheck(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"fmt"
"net"
"os"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/tidb-binlog/node"
"github.com/pingcap/tipb/go-binlog"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
var (
testMaxRecvMsgSize = 1024
testRetryTime = 5
)
type testCase struct {
binlogs []*binlog.Binlog
choosePumps []*PumpStatus
setAvliable []bool
setNodeID []string
}
func TestSelector(t *testing.T) {
strategys := []string{Hash, Range}
for _, strategy := range strategys {
testSelector(t, strategy)
}
}
func testSelector(t *testing.T, strategy string) {
pumpsClient := &PumpsClient{
Pumps: NewPumpInfos(),
Selector: NewSelector(strategy),
BinlogWriteTimeout: DefaultBinlogWriteTimeout,
}
pumps := []*PumpStatus{{}, {}, {}}
for i, pump := range pumps {
pump.NodeID = fmt.Sprintf("pump%d", i)
pump.State = node.Offline
// set pump client to avoid create grpc client.
pump.Client = binlog.NewPumpClient(nil)
}
for _, pump := range pumps {
pumpsClient.addPump(pump, false)
}
pumpsClient.Selector.SetPumps(copyPumps(pumpsClient.Pumps.AvaliablePumps))
tCase := &testCase{}
tCase.binlogs = []*binlog.Binlog{
{
Tp: binlog.BinlogType_Prewrite,
StartTs: 1,
}, {
Tp: binlog.BinlogType_Commit,
StartTs: 1,
CommitTs: 2,
}, {
Tp: binlog.BinlogType_Prewrite,
StartTs: 3,
}, {
Tp: binlog.BinlogType_Commit,
StartTs: 3,
CommitTs: 4,
}, {
Tp: binlog.BinlogType_Prewrite,
StartTs: 5,
}, {
Tp: binlog.BinlogType_Commit,
StartTs: 5,
CommitTs: 6,
},
}
tCase.setNodeID = []string{"pump0", "", "pump0", "pump1", "", "pump2"}
tCase.setAvliable = []bool{true, false, false, true, false, true}
tCase.choosePumps = []*PumpStatus{pumpsClient.Pumps.Pumps["pump0"], pumpsClient.Pumps.Pumps["pump0"], nil,
nil, pumpsClient.Pumps.Pumps["pump1"], pumpsClient.Pumps.Pumps["pump1"]}
for i, nodeID := range tCase.setNodeID {
if nodeID != "" {
pumpsClient.setPumpAvailable(pumpsClient.Pumps.Pumps[nodeID], tCase.setAvliable[i])
}
pump := pumpsClient.Selector.Select(tCase.binlogs[i], 0)
pumpsClient.Selector.Feedback(tCase.binlogs[i].StartTs, tCase.binlogs[i].Tp, pump)
require.Equal(t, pump, tCase.choosePumps[i])
}
for j := 0; j < 10; j++ {
prewriteBinlog := &binlog.Binlog{
Tp: binlog.BinlogType_Prewrite,
StartTs: int64(j),
}
commitBinlog := &binlog.Binlog{
Tp: binlog.BinlogType_Commit,
StartTs: int64(j),
}
pump1 := pumpsClient.Selector.Select(prewriteBinlog, 0)
if j%2 == 0 {
pump1 = pumpsClient.Selector.Select(prewriteBinlog, 1)
}
pumpsClient.Selector.Feedback(prewriteBinlog.StartTs, prewriteBinlog.Tp, pump1)
pumpsClient.setPumpAvailable(pump1, false)
pump2 := pumpsClient.Selector.Select(commitBinlog, 0)
pumpsClient.Selector.Feedback(commitBinlog.StartTs, commitBinlog.Tp, pump2)
// prewrite binlog and commit binlog with same start ts should choose same pump
require.Equal(t, pump1.NodeID, pump2.NodeID)
pumpsClient.setPumpAvailable(pump1, true)
// after change strategy, prewrite binlog and commit binlog will choose same pump
pump1 = pumpsClient.Selector.Select(prewriteBinlog, 0)
pumpsClient.Selector.Feedback(prewriteBinlog.StartTs, prewriteBinlog.Tp, pump1)
if strategy == Range {
err := pumpsClient.SetSelectStrategy(Hash)
require.NoError(t, err)
} else {
err := pumpsClient.SetSelectStrategy(Range)
require.NoError(t, err)
}
pump2 = pumpsClient.Selector.Select(commitBinlog, 0)
require.Equal(t, pump1.NodeID, pump2.NodeID)
// set back
err := pumpsClient.SetSelectStrategy(strategy)
require.NoError(t, err)
}
}
func TestWriteBinlog(t *testing.T) {
pumpServerConfig := []struct {
addr string
serverMode string
}{
{
"/tmp/mock-pump.sock",
"unix",
}, {
"127.0.0.1:15049",
"tcp",
},
}
// make test faster
RetryInterval = 100 * time.Millisecond
CommitBinlogTimeout = time.Second
for _, cfg := range pumpServerConfig {
pumpServer, err := createMockPumpServer(cfg.addr, cfg.serverMode, true)
require.NoError(t, err)
opt := grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(cfg.serverMode, addr, timeout)
})
clientCon, err := grpc.Dial(cfg.addr, opt, grpc.WithTransportCredentials(insecure.NewCredentials()))
require.NoError(t, err)
require.NotNil(t, clientCon)
pumpClient := mockPumpsClient(binlog.NewPumpClient(clientCon), true)
// test binlog size bigger than grpc's MaxRecvMsgSize
blog := &binlog.Binlog{
Tp: binlog.BinlogType_Prewrite,
PrewriteValue: make([]byte, testMaxRecvMsgSize+1),
}
err = pumpClient.WriteBinlog(blog)
require.Error(t, err)
for i := 0; i < 10; i++ {
// test binlog size smaller than grpc's MaxRecvMsgSize
blog = &binlog.Binlog{
Tp: binlog.BinlogType_Prewrite,
PrewriteValue: make([]byte, 1),
}
err = pumpClient.WriteBinlog(blog)
require.NoError(t, err)
}
// after write some binlog, the pump without grpc client will move to unavailable list in pump client.
require.Len(t, pumpClient.Pumps.UnAvaliablePumps, 1)
// test write commit binlog, will not return error although write binlog failed.
preWriteBinlog := &binlog.Binlog{
Tp: binlog.BinlogType_Prewrite,
StartTs: 123,
PrewriteValue: make([]byte, 1),
}
commitBinlog := &binlog.Binlog{
Tp: binlog.BinlogType_Commit,
StartTs: 123,
CommitTs: 123,
PrewriteValue: make([]byte, 1),
}
err = pumpClient.WriteBinlog(preWriteBinlog)
require.NoError(t, err)
// test when pump is down
pumpServer.Close()
// write commit binlog failed will not return error
err = pumpClient.WriteBinlog(commitBinlog)
require.NoError(t, err)
err = pumpClient.WriteBinlog(blog)
require.Error(t, err)
}
}
type mockPumpServer struct {
mode string
addr string
server *grpc.Server
withError bool
retryTime int
}
// WriteBinlog implements PumpServer interface.
func (p *mockPumpServer) WriteBinlog(ctx context.Context, req *binlog.WriteBinlogReq) (*binlog.WriteBinlogResp, error) {
if !p.withError {
return &binlog.WriteBinlogResp{}, nil
}
p.retryTime++
if p.retryTime < testRetryTime {
return &binlog.WriteBinlogResp{}, errors.New("fake error")
}
// only the last retry will return succuess
p.retryTime = 0
return &binlog.WriteBinlogResp{}, nil
}
// PullBinlogs implements PumpServer interface.
func (p *mockPumpServer) PullBinlogs(req *binlog.PullBinlogReq, srv binlog.Pump_PullBinlogsServer) error {
return nil
}
func (p *mockPumpServer) Close() {
p.server.Stop()
if p.mode == "unix" {
os.Remove(p.addr)
}
}
func createMockPumpServer(addr string, mode string, withError bool) (*mockPumpServer, error) {
if mode == "unix" {
os.Remove(addr)
}
l, err := net.Listen(mode, addr)
if err != nil {
return nil, err
}
serv := grpc.NewServer(grpc.MaxRecvMsgSize(testMaxRecvMsgSize))
pump := &mockPumpServer{
mode: mode,
addr: addr,
server: serv,
withError: withError,
}
binlog.RegisterPumpServer(serv, pump)
go serv.Serve(l)
return pump, nil
}
// mockPumpsClient creates a PumpsClient, used for test.
func mockPumpsClient(client binlog.PumpClient, withBadPump bool) *PumpsClient {
// add a available pump
nodeID1 := "pump-1"
pump1 := &PumpStatus{
Status: node.Status{
NodeID: nodeID1,
State: node.Online,
},
Client: client,
}
pumps := []*PumpStatus{pump1}
// add a pump without grpc client
nodeID2 := "pump-2"
pump2 := &PumpStatus{
Status: node.Status{
NodeID: nodeID2,
State: node.Online,
},
}
pumpInfos := NewPumpInfos()
pumpInfos.Pumps[nodeID1] = pump1
pumpInfos.AvaliablePumps[nodeID1] = pump1
if withBadPump {
pumpInfos.Pumps[nodeID2] = pump2
pumpInfos.AvaliablePumps[nodeID2] = pump2
pumps = append(pumps, pump2)
}
pCli := &PumpsClient{
ClusterID: 1,
Pumps: pumpInfos,
Selector: NewSelector(Range),
BinlogWriteTimeout: time.Second,
}
pCli.Selector.SetPumps(pumps)
return pCli
}
|
package app
import (
"context"
"net/http"
"time"
)
type App struct {
httpServer *http.Server
}
func (app *App) Run(port string, handler http.Handler) error {
app.httpServer = &http.Server{
Addr: ":" + port,
Handler: handler,
MaxHeaderBytes: 1 << 20, // 1 Mb
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
return app.httpServer.ListenAndServe()
}
func (app *App) Shutdown(ctx context.Context) error {
return app.httpServer.Shutdown(ctx)
}
|
package config
import (
"os"
"github.com/spf13/viper"
)
func Init() {
// Google
if os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" {
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", os.Getenv("STEAM_GOOGLE_APPLICATION_CREDENTIALS"))
}
//
viper.AutomaticEnv()
viper.SetEnvPrefix("STEAM")
// Rabbit
viper.SetDefault("RABBIT_USER", "guest")
viper.SetDefault("RABBIT_PASS", "guest")
// Other
viper.SetDefault("PORT", "8081")
viper.SetDefault("ENV", "local")
viper.SetDefault("MEMCACHE_DSN", "memcache:11211")
viper.SetDefault("PATH", "/root")
viper.SetDefault("MYSQL_DSN", "root@tcp(localhost:3306)/steam?parseTime=true")
viper.SetDefault("DOMAIN", "https://gamedb.online")
viper.SetDefault("SHORT_NAME", "GameDB")
}
|
package xml
import (
"testing"
)
const fragmentXml = "<root k='v' kk='vv'><child>text</child></root>"
func BenchmarkType(b *testing.B) {
d := desc(0)
for i := 0; i < b.N; i++ {
d.depth()
}
}
|
package ardupilotmega
/*
Generated using mavgen - https://github.com/ArduPilot/pymavlink/
Copyright 2020 queue-b <https://github.com/queue-b>
Permission is hereby granted, free of charge, to any person obtaining a copy
of the generated software (the "Generated Software"), to deal
in the Generated Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Generated Software, and to permit persons to whom the Generated
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Generated Software.
THE GENERATED SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE GENERATED SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE GENERATED SOFTWARE.
*/
import (
"bytes"
"encoding/binary"
"fmt"
"text/tabwriter"
"github.com/queue-b/go-mavlink2"
"github.com/queue-b/go-mavlink2/util"
)
/*AHRS Status of DCM attitude estimator. */
type AHRS struct {
/*Omegaix X gyro drift estimate. */
Omegaix float32
/*Omegaiy Y gyro drift estimate. */
Omegaiy float32
/*Omegaiz Z gyro drift estimate. */
Omegaiz float32
/*AccelWeight Average accel_weight. */
AccelWeight float32
/*RenormVal Average renormalisation value. */
RenormVal float32
/*ErrorRp Average error_roll_pitch value. */
ErrorRp float32
/*ErrorYaw Average error_yaw value. */
ErrorYaw float32
/*HasExtensionFieldValues indicates if this message has any extensions and */
HasExtensionFieldValues bool
}
func (m *AHRS) String() string {
format := ""
var buffer bytes.Buffer
writer := tabwriter.NewWriter(&buffer, 0, 0, 2, ' ', 0)
format += "Name:\t%v/%v\n"
// Output field values based on the decoded message type
format += "Omegaix:\t%v [rad/s]\n"
format += "Omegaiy:\t%v [rad/s]\n"
format += "Omegaiz:\t%v [rad/s]\n"
format += "AccelWeight:\t%v \n"
format += "RenormVal:\t%v \n"
format += "ErrorRp:\t%v \n"
format += "ErrorYaw:\t%v \n"
fmt.Fprintf(
writer,
format,
m.GetDialect(),
m.GetMessageName(),
m.Omegaix,
m.Omegaiy,
m.Omegaiz,
m.AccelWeight,
m.RenormVal,
m.ErrorRp,
m.ErrorYaw,
)
writer.Flush()
return string(buffer.Bytes())
}
// GetVersion gets the MAVLink version of the Message contents
func (m *AHRS) GetVersion() int {
if m.HasExtensionFieldValues {
return 2
}
return 1
}
// GetDialect gets the name of the dialect that defines the Message
func (m *AHRS) GetDialect() string {
return "ardupilotmega"
}
// GetMessageName gets the name of the Message
func (m *AHRS) GetMessageName() string {
return "AHRS"
}
// GetID gets the ID of the Message
func (m *AHRS) GetID() uint32 {
return 163
}
// HasExtensionFields returns true if the message definition contained extensions; false otherwise
func (m *AHRS) HasExtensionFields() bool {
return false
}
func (m *AHRS) getV1Length() int {
return 28
}
func (m *AHRS) getIOSlice() []byte {
return make([]byte, 28+1)
}
// Read sets the field values of the message from the raw message payload
func (m *AHRS) Read(frame mavlink2.Frame) (err error) {
version := frame.GetVersion()
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// Don't attempt to Read V2 messages from V1 frames
if m.GetID() > 255 && version < 2 {
err = mavlink2.ErrDecodeV2MessageV1Frame
return
}
// binary.Read can panic; swallow the panic and return a sane error
defer func() {
if r := recover(); r != nil {
err = mavlink2.ErrPrivateField
}
}()
// Get a slice of bytes long enough for the all the AHRS fields
// binary.Read requires enough bytes in the reader to read all fields, even if
// the fields are just zero values. This also simplifies handling MAVLink2
// extensions and trailing zero truncation.
ioSlice := m.getIOSlice()
copy(ioSlice, frame.GetMessageBytes())
// Indicate if
if version == 2 && m.HasExtensionFields() {
ioSlice[len(ioSlice)-1] = 1
}
reader := bytes.NewReader(ioSlice)
err = binary.Read(reader, binary.LittleEndian, m)
return
}
// Write encodes the field values of the message to a byte array
func (m *AHRS) Write(version int) (output []byte, err error) {
var buffer bytes.Buffer
// Ensure only Version 1 or Version 2 were specified
if version != 1 && version != 2 {
err = mavlink2.ErrUnsupportedVersion
return
}
// Don't attempt to Write V2 messages to V1 bodies
if m.GetID() > 255 && version < 2 {
err = mavlink2.ErrEncodeV2MessageV1Frame
return
}
err = binary.Write(&buffer, binary.LittleEndian, *m)
if err != nil {
return
}
output = buffer.Bytes()
// V1 uses fixed message lengths and does not include any extension fields
// Truncate the byte slice to the correct length
// This also removes the trailing extra byte written for HasExtensionFieldValues
if version == 1 {
output = output[:m.getV1Length()]
}
// V2 uses variable message lengths and includes extension fields
// The variable length is caused by truncating any trailing zeroes from
// the end of the message before it is added to a frame
if version == 2 {
// Set HasExtensionFieldValues to zero so that it doesn't interfere with V2 truncation
output[len(output)-1] = 0
output = util.TruncateV2(buffer.Bytes())
}
return
}
|
package sol
func containsDuplicate(nums []int) bool {
meno := make(map[int]bool)
for _, num := range nums {
if _, dup := meno[num]; dup {
return true
}
meno[num] = true
}
return false
}
|
package log
import (
"errors"
"io"
"os"
"testing"
"github.com/stretchr/testify/require"
api "github.com/tkhoa2711/proglog/api/v1"
)
func makeSegment(baseOffset uint64) (s *segment, dir string, err error) {
dir, err = os.MkdirTemp("", "segment-test")
if err != nil {
return nil, "", err
}
c := Config{}
c.Segment.MaxIndexBytes = entryWidth * 3
s, err = newSegment(dir, 16, c)
if err != nil {
return nil, "", err
}
return s, dir, nil
}
func makeSegmentWithSomeData(baseOffset uint64, record *api.Record) (s *segment, dir string, err error) {
s, dir, err = makeSegment(baseOffset)
if err != nil {
return nil, dir, err
}
for i := uint64(0); i < 3; i++ {
_, err := s.Append(record)
if err != nil {
return nil, dir, err
}
}
return s, dir, nil
}
func TestNewSegment(t *testing.T) {
dir, _ := os.MkdirTemp("", "segment-test")
defer os.RemoveAll(dir)
c := Config{}
c.Segment.MaxIndexBytes = entryWidth * 3
s, err := newSegment(dir, 16, c)
require.NoError(t, err)
require.Equal(t, uint64(16), s.baseOffset)
require.Equal(t, uint64(16), s.nextOffset)
}
func TestNewSegmentRehydrateFromExistingState(t *testing.T) {
t.Skip("FIXME")
var baseOffset = uint64(16)
record := &api.Record{Value: []byte("Hello World!")}
s, dir, err := makeSegmentWithSomeData(baseOffset, record)
require.NoError(t, err)
defer os.RemoveAll(dir)
c := Config{}
c.Segment.MaxStoreBytes = uint64(len(record.Value) * 3)
c.Segment.MaxIndexBytes = entryWidth * 3
s2, err := newSegment(dir, baseOffset, c)
require.NoError(t, err)
require.Equal(t, s.index.size, s2.index.size)
require.Equal(t, s.store.size, s2.store.size)
}
func TestSegmentAppend(t *testing.T) {
var baseOffset = uint64(16)
s, dir, err := makeSegment(baseOffset)
require.NoError(t, err)
defer os.RemoveAll(dir)
want := &api.Record{Value: []byte("Hello World!")}
for i := uint64(0); i < 3; i++ {
off, err := s.Append(want)
require.NoError(t, err)
require.Equal(t, baseOffset+i, off)
}
}
func TestSegmentReadAfterAppend(t *testing.T) {
var baseOffset = uint64(16)
record := &api.Record{Value: []byte("Hello World!")}
s, dir, err := makeSegmentWithSomeData(baseOffset, record)
require.NoError(t, err)
defer os.RemoveAll(dir)
for i := uint64(0); i < 3; i++ {
got, err := s.Read(baseOffset + i)
require.NoError(t, err)
require.Equal(t, record.Value, got.Value)
}
}
func TestSegmentAppendOverLimit(t *testing.T) {
var baseOffset = uint64(16)
record := &api.Record{Value: []byte("Hello World!")}
s, dir, err := makeSegmentWithSomeData(baseOffset, record)
require.NoError(t, err)
defer os.RemoveAll(dir)
_, err = s.Append(record)
require.Equal(t, io.EOF, err)
}
func TestSegmentClose(t *testing.T) {
var baseOffset = uint64(16)
s, dir, err := makeSegment(baseOffset)
require.NoError(t, err)
defer os.RemoveAll(dir)
err = s.Close()
require.NoError(t, err)
}
func TestSegmentRemove(t *testing.T) {
var baseOffset = uint64(16)
s, dir, err := makeSegment(baseOffset)
require.NoError(t, err)
defer os.RemoveAll(dir)
err = s.Remove()
require.NoError(t, err)
// check that the store and index files had been removed
if _, err := os.Stat(s.index.Name()); !errors.Is(err, os.ErrNotExist) {
require.NoError(t, err)
}
if _, err := os.Stat(s.store.Name()); !errors.Is(err, os.ErrNotExist) {
require.NoError(t, err)
}
}
|
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(flipCase("Hello World"))
fmt.Println(flipCase("HaHaHa"))
}
func flipCase(s string) string {
var res string;
for i := 0 ; i < len(s) ; i++ {
if strings.ToLower(string(s[i])) == string(s[i]) { //letter is lowercase
res += strings.ToUpper(string(s[i]))
} else {
res += strings.ToLower(string(s[i]))
}
}
return res
} |
package http
import (
"net/http"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/upframe/api"
)
func tokensGet(w http.ResponseWriter, r *http.Request, c *api.Config) (int, interface{}, error) {
password := r.FormValue("password")
if password != api.Password {
return http.StatusUnauthorized, nil, nil
}
// Expires the token and cookie in 24 hour
expireToken := time.Now().Add(time.Hour * 24).Unix()
// We'll manually assign the claims but in production you'd insert values from a database
claims := jwt.StandardClaims{
ExpiresAt: expireToken,
Issuer: "api.upframe.co",
}
// Create the token using your claims
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Signs the token with a secret.
signedToken, _ := token.SignedString(api.JWTSecret)
return 200, signedToken, nil
}
|
package roman_to_integer
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test_romanToInt(t *testing.T) {
cases := map[string]int{
"III": 3,
"IV": 4,
"IX": 9,
"LVIII": 58,
"MCMXCIV": 1994,
}
for key, value := range cases {
assert.Equal(t, value, romanToInt(key))
}
}
|
package client
import (
"context"
"errors"
"github.com/wish/ctl/pkg/client/filter"
"github.com/wish/ctl/pkg/client/types"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetCronJob returns a single cron job
func (c *Client) GetCronJob(contextStr, namespace string, name string, options GetOptions) (*types.CronJobDiscovery, error) {
cs, err := c.getContextInterface(contextStr)
if err != nil {
return nil, err
}
// REVIEW: In the future it will be useful to have a function to convert client.GetOptions -> metav1.GetOptions
cronjob, err := cs.BatchV1beta1().CronJobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
d := types.CronJobDiscovery{contextStr, *cronjob}
c.Transform(&d)
if !filter.MatchLabel(d, options.LabelMatch) {
return nil, errors.New("found object does not satisfy filters")
}
return &d, nil
}
// FindCronJobs simultaneously searches for multiple cron jobs and returns all results
func (c *Client) FindCronJobs(contexts []string, namespace string, names []string, options ListOptions) ([]types.CronJobDiscovery, error) {
if len(contexts) == 0 {
contexts = c.GetFilteredContexts(options.LabelMatch)
} else {
contexts = c.FilterContexts(contexts, options.LabelMatch)
}
// Creating set of names
positive := make(map[string]struct{})
for _, name := range names {
positive[name] = struct{}{}
}
all, err := c.ListCronJobsOverContexts(contexts, namespace, options)
if err != nil {
return nil, err
}
var ret []types.CronJobDiscovery
for _, j := range all {
if _, ok := positive[j.Name]; ok {
ret = append(ret, j)
}
}
return ret, nil
}
|
package services
import (
"log"
"time"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/entity"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/repository"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/request"
"github.com/mashingan/smapping"
)
type TransactionServices interface {
CreateTransaction(req request.RequestTransaction) (entity.Transaction, error)
GetTransactionByEventAndParticipantAndStatusPayment(trx entity.Transaction) bool
GetTransactionByEventID(req request.RequestTransaction) ([]entity.Transaction, error)
GetPendingTrasaction() ([]entity.Transaction, error)
GetParticipantPendingTrasaction(req request.RequestParticipantTransaction) ([]entity.Transaction, error)
UpdateTransaction(req request.RequestTransactionUpdate) (entity.Transaction, error)
}
type transactionServices struct {
transactionRepository repository.TransactionRepository
}
func NewTransactionServices(transactionRepository repository.TransactionRepository) *transactionServices {
return &transactionServices{transactionRepository}
}
func (s *transactionServices) CreateTransaction(req request.RequestTransaction) (entity.Transaction, error) {
var trx entity.Transaction
err := smapping.FillStruct(&trx, smapping.MapFields(&req))
if err != nil {
log.Fatalf("Failed to map %v", err)
}
trx.CreatedAt = time.Now()
createTrx, err := s.transactionRepository.InsertTransaction(trx)
if err != nil {
return trx, err
}
return createTrx, nil
}
func (s *transactionServices) GetTransactionByEventAndParticipantAndStatusPayment(trx entity.Transaction) bool {
err := s.transactionRepository.GetTransactionByEventAndParticipantAndStatusPayment(trx)
return err == nil
}
func (s *transactionServices) GetTransactionByEventID(req request.RequestTransaction) ([]entity.Transaction, error) {
var trx entity.Transaction
trx.EventId = int(req.EventId)
trxs, err := s.transactionRepository.GetTransactionByEventID(trx)
if err != nil {
return trxs, err
}
return trxs, nil
}
func (s *transactionServices) GetPendingTrasaction() ([]entity.Transaction, error) {
var trx entity.Transaction
trx.StatusPayment = ""
result, err := s.transactionRepository.GetTransactionByStatusPayment(trx)
if err == nil {
return result, err
}
return result, nil
}
func (s *transactionServices) GetParticipantPendingTrasaction(req request.RequestParticipantTransaction) ([]entity.Transaction, error) {
var trx entity.Transaction
trx.StatusPayment = ""
trx.ParticipantId = int(req.ParticipantId)
result, err := s.transactionRepository.GetTransactionByStatusPayment(trx)
if err == nil {
return result, err
}
return result, nil
}
func (s *transactionServices) UpdateTransaction(req request.RequestTransactionUpdate) (entity.Transaction, error) {
var trx entity.Transaction
err := smapping.FillStruct(&trx, smapping.MapFields(&req))
if err != nil {
log.Fatalf("Failed to map %v", err)
}
trx.UpdatedAt = time.Now()
update, err := s.transactionRepository.UpdateTransaction(trx)
if err != nil {
return trx, err
}
return update, nil
}
|
package config
import (
"io/ioutil"
"gopkg.in/yaml.v3"
)
var configModel *Config
// NewConfig gets the configuration based on the environment passed
func NewConfig(env string) (IConfig, error) {
configFile := "config/tier/" + env + ".yaml"
bytes, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(bytes, &configModel)
if err != nil {
return nil, err
}
// Returns
return &IConfigModel{model: configModel}, nil
}
// Get implements the interface function for IConfig
func (ic *IConfigModel) Get() *Config {
return ic.model
}
|
// Copyright (C) 2015 Nicolas Lamirault <nicolas.lamirault@gmail.com>
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gitlab
import (
"strings"
)
// Repository represents the repository content from webhook
type Repository struct {
Name string `json:"name"`
URL string `json:"url"`
Description string `json:"description"`
Home string `json:"home"`
GitHTTPURL string `json:"git_http_url"`
GitSSHURL string `json:"git_ssh_url"`
}
// Commit represents commit information from the webhook
type Commit struct {
ID string `json:"id"`
Message string `json:"message"`
Timestamp string `json:"timestamp"`
URL string `json:"url"`
Author Author `json:"author"`
}
// Author represents author information from the webhook
type Author struct {
Name string `json:"name"`
Email string `json:"email"`
}
// User represents the user information from the webhook
type User struct {
Name string `json:"name"`
Username string `json:"username"`
AvatarURL string `json:"avatar_url"`
}
// Attributes represents the object attributes from the webhook
type Attributes struct {
ID string `json:"id"`
Title string `json:"title"`
AssigneeID int `json:"assignee_id"`
AuthorID int `json:"author_id"`
ProjectID int `json:"project_id"`
Created string `json:"created_at"`
Updated string `json:"updated_at"`
Position int `json:"position"`
BranchName string `json:"branch_name"`
Description string `json:"description"`
MilestoneID int `json:"milestone_id"`
State string `json:"state"`
IID int `json:"iid"`
URL string `json:"url"`
Action string `json:"action"`
}
// PushWebhook represents push information from the webhook
type PushWebhook struct {
Before string `json:"before"`
After string `json:"after"`
Ref string `json:"ref"`
Username string `json:"user_name"`
UserID int `json:"user_id"`
ProjectID int `json:"project_id"`
Repository Repository `json:"repository"`
Commits []Commit `json:"commits"`
TotalCommitsCount int `json:"total_commits_count"`
}
// TagWebhook represents tag information from the webhook
type TagWebhook struct {
Before string `json:"before"`
After string `json:"after"`
Ref string `json:"ref"`
Username string `json:"user_name"`
UserID int `json:"user_id"`
ProjectID int `json:"project_id"`
Repository Repository `json:"repository"`
Commits []Commit `json:"commits"`
TotalCommitsCount int `json:"total_commits_count"`
}
// IssueWebhook represents issue information from the webhook
type IssueWebhook struct {
Kind string `json:"object_kind"`
User User `json:"user"`
Attributes Attributes `json:"object_attributes"`
}
// ExtractTagFromRef extract tag name
func ExtractTagFromRef(ref string) string {
tokens := strings.Split(ref, "/")
return tokens[len(tokens)-1]
}
|
package main
import (
"testing"
)
func Test_findRow(t *testing.T) {
type args struct {
code string
min int
max int
}
tests := []struct {
name string
args args
want int
}{
{"F should return 0 for 0-1", args{"F", 0, 1}, 0},
{"B should return 1 for 0-1", args{"B", 0, 1}, 1},
{"FB should return 1 for 0-3", args{"FB", 0, 3}, 1},
{"BFB should return 5 for 0-7", args{"BFB", 0, 7}, 5},
{"FBFBBFF should return 43 for 0-127", args{"FBFBBFF", 0, 127}, 44},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := findRow(tt.args.code, tt.args.min, tt.args.max); got != tt.want {
t.Errorf("findRow(%v, %v, %v) = %v, want %v", tt.args.code, tt.args.min, tt.args.max, got, tt.want)
}
})
}
}
func Test_findColumn(t *testing.T) {
type args struct {
code string
min int
max int
}
tests := []struct {
name string
args args
want int
}{
{"L should return 0 for 0-1", args{"L", 0, 1}, 0},
{"R should return 1 for 0-1", args{"R", 0, 1}, 1},
{"LR should return 1 for 0-3", args{"LR", 0, 3}, 1},
{"RLR should return 5 for 0-7", args{"RLR", 0, 7}, 5},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := findColumn(tt.args.code, tt.args.min, tt.args.max); got != tt.want {
t.Errorf("findColumn() = %v, want %v", got, tt.want)
}
})
}
}
func Test_computeID(t *testing.T) {
code := "FBFBBFFRLR"
want := 357
if got := computeID(code); got != want {
t.Errorf("ID of FBFBBFFRLR is %v, expected 357", got)
}
} |
package boshio_test
import (
"net/http"
"net/http/httptest"
"regexp"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"fmt"
"testing"
)
func TestBoshio(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Boshio Suite")
}
var (
boshioServer *server
)
type server struct {
RedirectHandler http.HandlerFunc
TarballHandler http.HandlerFunc
LightAPIHandler http.HandlerFunc
HeavyAPIHandler http.HandlerFunc
HeavyAndLightAPIHandler http.HandlerFunc
mux *http.ServeMux
s *httptest.Server
}
func (s *server) Start() {
s.mux.HandleFunc("/path/to/light-different-stemcell.tgz", boshioServer.TarballHandler)
s.mux.HandleFunc("/path/to/heavy-different-stemcell.tgz", boshioServer.TarballHandler)
s.mux.HandleFunc("/api/v1/stemcells/some-light-stemcell", boshioServer.LightAPIHandler)
s.mux.HandleFunc("/api/v1/stemcells/some-heavy-stemcell", boshioServer.HeavyAPIHandler)
s.mux.HandleFunc("/api/v1/stemcells/some-light-and-heavy-stemcell", boshioServer.HeavyAndLightAPIHandler)
s.s.Start()
}
func (s *server) Stop() {
s.s.Close()
}
func (s *server) URL() string {
return "http://" + s.s.Listener.Addr().String() + "/"
}
var _ = BeforeEach(func() {
router := http.NewServeMux()
testServer := httptest.NewUnstartedServer(router)
boshioServer = &server{
mux: router,
TarballHandler: tarballHandler,
LightAPIHandler: lightAPIHandler,
HeavyAPIHandler: heavyAPIHandler,
HeavyAndLightAPIHandler: heavyAndLightAPIHandler,
s: testServer,
}
})
var _ = AfterEach(func() {
boshioServer.Stop()
})
func tarballHandler(w http.ResponseWriter, req *http.Request) {
if req.Method == "HEAD" {
w.Header().Add("Content-Length", "100")
return
}
w.WriteHeader(http.StatusPartialContent)
ex := regexp.MustCompile(`bytes=(\d+)-(\d+)`)
matches := ex.FindStringSubmatch(req.Header.Get("Range"))
start, err := strconv.Atoi(matches[1])
if err != nil {
Fail(err.Error())
}
end, err := strconv.Atoi(matches[2])
if err != nil {
Fail(err.Error())
}
content := []byte("this string is definitely not long enough to be 100 bytes but we get it there with a little bit of..")
w.Write(content[start : end+1])
}
func lightAPIHandler(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(fmt.Sprintf(`[{
"name": "a stemcell",
"version": "some version",
"light": {
"url": "%spath/to/light-different-stemcell.tgz",
"size": 100,
"md5": "qqqq",
"sha1": "2222",
"sha256": "4444"
}
}]`, boshioServer.URL())))
}
func heavyAPIHandler(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(fmt.Sprintf(`[{
"regular": {
"url": "%spath/to/heavy-different-stemcell.tgz",
"size": 2000,
"md5": "zzzz",
"sha1": "asdf",
"sha256": "qwerty"
}
}]`, boshioServer.URL())))
}
func heavyAndLightAPIHandler(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(fmt.Sprintf(`[{
"regular": {
"url": "%spath/to/heavy-different-stemcell.tgz",
"size": 2000,
"md5": "zzzz",
"sha1": "asdf",
"sha256": "qwerty"
},
"light": {
"url": "%spath/to/light-different-stemcell.tgz",
"size": 100,
"md5": "qqqq",
"sha1": "2222",
"sha256": "4444"
}
}]`, boshioServer.URL(), boshioServer.URL())))
}
func serverPath(path string) string {
return fmt.Sprintf("%s%s", boshioServer.URL(), path)
}
|
package main
import (
"fmt"
"html/template"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
)
func lsmodLineToKernelModule(index int, line string) KernelModule {
spaceRegexp := regexp.MustCompile(`\s+`)
line = spaceRegexp.ReplaceAllString(line, " ")
lineElements := strings.Split(line, " ")
var kernel_module KernelModule
for elementIndex, element := range lineElements {
if elementIndex == 0 {
}
switch elementIndex {
case 0:
kernel_module.Name = element
break
case 2:
kernel_module.UsedByCount, _ = strconv.ParseInt(element, 10, 64)
break
case 3:
kernel_module.UsedBySlice = strings.Split(element, ",")
}
}
kernel_module.Id = index
return kernel_module
}
func runLsmod() []string {
cmd := "lsmod"
lsmodOutputByte, _ := exec.Command("bash", "-c", cmd).Output()
lsmodOutput := string(lsmodOutputByte)
lsmodLineByLine := strings.Split(lsmodOutput, "\n")
return lsmodLineByLine[1:]
}
func readLsmod(maxModulesOptional ...int) []KernelModule {
var maxModules int = math.MaxInt32
if len(maxModulesOptional) > 0 {
maxModules = maxModulesOptional[0]
}
var kernelModules []KernelModule
lsmodLineByLine := runLsmod()
for i, line := range lsmodLineByLine[:len(lsmodLineByLine)-1] {
if i >= maxModules {
break
}
kernelModules = append(kernelModules, lsmodLineToKernelModule(i+1, line))
}
return kernelModules
}
type KernelModule struct {
Name string
UsedByCount int64
UsedBySlice []string
Id int
}
func get_nodes_string(modules []KernelModule) string {
var res string = "\nvar nodes = new vis.DataSet(["
for _, module := range modules {
res += "\n\t{id: " + strconv.Itoa(module.Id) + ", label: '" + module.Name + "'},"
var n Node = Node{module.Id, module.Name}
nodes = append(nodes, n)
}
res += "\n]);"
return res
}
func get_edges_string(modules []KernelModule) string {
var res string = "\nvar edges = new vis.DataSet(["
edges_set := map[string]string{}
for _, module := range modules {
for _, usedByItem := range module.UsedBySlice {
edges_set[usedByItem] = module.Name
res += "\n\t{from: " + strconv.Itoa(name_to_id[usedByItem]) + ", to: " + strconv.Itoa(name_to_id[module.Name]) + ", arrows:'to'},"
var e Edge = Edge{name_to_id[usedByItem], name_to_id[module.Name]}
edges = append(edges, e)
}
}
res += `
]);`
return res
}
func create_html(nodes_and_edges string) string {
var html string = `
<!doctype html>
<html>
<head>
<title>lsmod graph</title>
<script type="text/javascript" src="./vis/vis.js"></script>
<link href="./vis/vis-network.min.css" rel="stylesheet" type="text/css" />
<style type="text/css">
#mynetwork {
width: 70%;
height: 800px;
margin: auto;
border: 3px solid green;
padding: 10px;
}
}
</style>
</head>
<body>
<p>
lsmod:
</p>
<div id="mynetwork"></div>
<script type="text/javascript">
// create an array with nodes
`
html += nodes_and_edges
html += `
// create a network
var container = document.getElementById('mynetwork');
var data = {
nodes: nodes,
edges: edges
};
var options = {
};
var network = new vis.Network(container, data, options);
</script>
</body>
</html>
`
return html
}
func create_html_file(fileName string, htmlString string) {
f, err := os.Create(fileName)
if err != nil {
fmt.Println(err)
}
defer f.Close()
f.WriteString(htmlString)
f.Sync()
}
var id_to_name = make(map[int]string)
var name_to_id = make(map[string]int)
type Node struct {
Id int
Label string
}
type MyData struct {
Nodes []Node
Edges []Edge
}
type Edge struct {
From int
To int
//Arrows string
}
var nodes []Node
var edges []Edge
func set_maps(modules []KernelModule) {
for _, module := range modules {
id_to_name[module.Id] = module.Name
name_to_id[module.Name] = module.Id
}
}
func http_server1(nodes_and_edges string, port int) {
tmpl := template.Must(template.ParseFiles("./layout.html"))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
data :=
struct {
NodesAndEdges string
Edges string
}{
NodesAndEdges: nodes_and_edges,
}
tmpl.Execute(w, data)
})
port_str := ":" + strconv.Itoa(port)
http.ListenAndServe(port_str, nil)
}
func http_server(port int) {
fs := http.FileServer(http.Dir("vis"))
http.Handle("/vis/", http.StripPrefix("/vis/", fs))
tmpl := template.Must(template.ParseFiles("./layout.html"))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
data := MyData{
Nodes: nodes,
Edges: edges,
}
tmpl.Execute(w, data)
})
port_str := ":" + strconv.Itoa(port)
err := http.ListenAndServe(port_str, nil)
fmt.Println(err)
}
func main() {
modules := readLsmod(20)
set_maps(modules)
nodes_and_edges := get_nodes_string(modules)
nodes_and_edges += get_edges_string(modules)
htmlString := create_html(nodes_and_edges)
create_html_file("lsmod.html", htmlString)
http_server(8080)
}
|
package caching
import "errors"
var (
ErrCacheUnavailable = errors.New("cache unavailable")
)
type Cache interface {
IsOk() bool
Get(key string) (string, error)
Set(key string, value string) error
Del(key string) error
}
|
package service
import (
"demo/grpc_test/proto/helloworld"
"golang.org/x/net/context"
"log"
)
type GreeterServer struct {
}
func (g *GreeterServer) SayHello(ctx context.Context, req *helloworld.HelloRequest) (*helloworld.HelloReply, error) {
log.Println(req)
rp := &helloworld.HelloReply{
Message: "Hello" + req.Name,
}
return rp, nil
}
|
package main
func trap(height []int) int {
n := len(height)
lo, hi := 0, n-1
maxLeft, maxRight := 0, 0
totalAccumulated := 0
for lo < hi {
if height[lo] < height[hi] {
if height[lo] < maxLeft {
totalAccumulated += maxLeft - height[lo]
} else {
maxLeft = height[lo]
}
lo++
} else {
if height[hi] < maxRight {
totalAccumulated += maxRight - height[hi]
} else {
maxRight = height[hi]
}
hi--
}
}
return totalAccumulated
}
func main() {
}
|
package main
import (
"fmt"
"github.com/go-redis/redis"
)
// 声明一个全局的rdb变量
var rdb *redis.Client
// 初始化连接
func initClient() (err error) {
// 传入的是Redis的数据库配置结构体
rdb = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 1, // use default DB
})
_, err = rdb.Ping().Result()
if err != nil {
return err
}
return nil
}
func redisExample2() {
zsetKey := "language_rank"
languages := []redis.Z{
redis.Z{Score: 90.0, Member: "Golang"},
redis.Z{Score: 98.0, Member: "Java"},
redis.Z{Score: 95.0, Member: "Python"},
redis.Z{Score: 97.0, Member: "JavaScript"},
redis.Z{Score: 99.0, Member: "C/C++"},
}
// ZADD
num, err := rdb.ZAdd(zsetKey, languages...).Result()
if err != nil {
fmt.Printf("zadd failed, err:%v\n", err)
return
}
fmt.Printf("zadd %d succ.\n", num)
// 把Golang的分数加10
newScore, err := rdb.ZIncrBy(zsetKey, 10.0, "Golang").Result()
if err != nil {
fmt.Printf("zincrby failed, err:%v\n", err)
return
}
fmt.Printf("Golang's score is %f now.\n", newScore)
// 取分数最高的3个
ret, err := rdb.ZRevRangeWithScores(zsetKey, 0, 2).Result()
if err != nil {
fmt.Printf("zrevrange failed, err:%v\n", err)
return
}
for _, z := range ret {
fmt.Println(z.Member, z.Score)
}
// 取95~100分的
op := redis.ZRangeBy{
Min: "95",
Max: "100",
}
ret, err = rdb.ZRangeByScoreWithScores(zsetKey, op).Result()
if err != nil {
fmt.Printf("zrangebyscore failed, err:%v\n", err)
return
}
for _, z := range ret {
fmt.Println(z.Member, z.Score)
}
}
func main() {
err := initClient()
if err != nil {
fmt.Println("Redis初始化失败,", err)
} else {
fmt.Println("初始化Redis连接成功")
}
} |
package main
import (
"fmt"
"strings"
"github.com/fiorix/wsdl2go/soap"
)
// Namespace was auto-generated from WSDL.
var Namespace = "http://wsiv.ratp.fr"
// NewWsivPortType creates an initializes a WsivPortType.
func NewWsivPortType(cli *soap.Client) WsivPortType {
return &wsivPortType{cli}
}
// WsivPortType was auto-generated from WSDL
// and defines interface for the remote service. Useful for testing.
type WsivPortType interface {
// GetDirections was auto-generated from WSDL.
GetDirections(parameters *GetDirections) (*GetDirectionsResponse, error)
// GetGeoPoints was auto-generated from WSDL.
GetGeoPoints(parameters *GetGeoPoints) (*GetGeoPointsResponse, error)
// GetLines was auto-generated from WSDL.
GetLines(parameters *GetLines) (*GetLinesResponse, error)
// GetMission was auto-generated from WSDL.
GetMission(parameters *GetMission) (*GetMissionResponse, error)
// GetMissionsFirstLast was auto-generated from WSDL.
GetMissionsFirstLast(parameters *GetMissionsFirstLast) (*GetMissionsFirstLastResponse, error)
// GetMissionsFrequency was auto-generated from WSDL.
GetMissionsFrequency(parameters *GetMissionsFrequency) (*GetMissionsFrequencyResponse, error)
// GetMissionsNext was auto-generated from WSDL.
GetMissionsNext(parameters *GetMissionsNext) (*GetMissionsNextResponse, error)
// GetPerturbations was auto-generated from WSDL.
GetPerturbations(parameters *GetPerturbations) (*GetPerturbationsResponse, error)
// GetStations was auto-generated from WSDL.
GetStations(parameters *GetStations) (*GetStationsResponse, error)
// GetVersion was auto-generated from WSDL.
GetVersion() (*GetVersionResponse, error)
}
// Direction was auto-generated from WSDL.
type Direction struct {
Line *Line `xml:"http://wsiv.ratp.fr/xsd line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
Name *string `xml:"http://wsiv.ratp.fr/xsd name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
Sens *string `xml:"http://wsiv.ratp.fr/xsd sens,omitempty" json:"sens,omitempty" yaml:"sens,omitempty"`
StationsEndLine []*Station `xml:"http://wsiv.ratp.fr/xsd stationsEndLine,omitempty" json:"stationsEndLine,omitempty" yaml:"stationsEndLine,omitempty"`
}
// GeoPoint was auto-generated from WSDL.
type GeoPoint struct {
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
NameSuffix *string `xml:"nameSuffix,omitempty" json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"`
Type *string `xml:"type,omitempty" json:"type,omitempty" yaml:"type,omitempty"`
X *float64 `xml:"x,omitempty" json:"x,omitempty" yaml:"x,omitempty"`
Y *float64 `xml:"y,omitempty" json:"y,omitempty" yaml:"y,omitempty"`
}
// Itinerary was auto-generated from WSDL.
type Itinerary struct {
DateEnd *string `xml:"dateEnd,omitempty" json:"dateEnd,omitempty" yaml:"dateEnd,omitempty"`
DateStart *string `xml:"dateStart,omitempty" json:"dateStart,omitempty" yaml:"dateStart,omitempty"`
DurationsTransit []*int `xml:"durationsTransit,omitempty" json:"durationsTransit,omitempty" yaml:"durationsTransit,omitempty"`
GeoPointEnd *GeoPoint `xml:"geoPointEnd,omitempty" json:"geoPointEnd,omitempty" yaml:"geoPointEnd,omitempty"`
GeoPointStart *GeoPoint `xml:"geoPointStart,omitempty" json:"geoPointStart,omitempty" yaml:"geoPointStart,omitempty"`
Missions []*Mission `xml:"missions,omitempty" json:"missions,omitempty" yaml:"missions,omitempty"`
Tarif *Tarif `xml:"tarif,omitempty" json:"tarif,omitempty" yaml:"tarif,omitempty"`
}
// Line was auto-generated from WSDL.
type Line struct {
Code *string `xml:"code,omitempty" json:"code,omitempty" yaml:"code,omitempty"`
CodeStif *string `xml:"codeStif,omitempty" json:"codeStif,omitempty" yaml:"codeStif,omitempty"`
Id *string `xml:"http://wsiv.ratp.fr/xsd id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Image *string `xml:"image,omitempty" json:"image,omitempty" yaml:"image,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
Realm *string `xml:"realm,omitempty" json:"realm,omitempty" yaml:"realm,omitempty"`
Reseau *Reseau `xml:"reseau,omitempty" json:"reseau,omitempty" yaml:"reseau,omitempty"`
ComputedCode string `xml:"computedCode,omitempty" json:"computedCode,omitempty"`
}
func (line *Line) GetComputedCode() string {
switch *line.Reseau.Code {
case "metro":
return "M" + strings.ToUpper(*line.Code)
case "rer":
return "R" + *line.Code
case "tram":
return "B" + *line.Code
default:
return *line.Code
}
}
// Mission was auto-generated from WSDL.
type Mission struct {
Code *string `xml:"code,omitempty" json:"code,omitempty" yaml:"code,omitempty"`
Direction *Direction `xml:"direction,omitempty" json:"direction,omitempty" yaml:"direction,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Line *Line `xml:"line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
Perturbations []*Perturbation `xml:"perturbations,omitempty" json:"perturbations,omitempty" yaml:"perturbations,omitempty"`
StationEndLine *Station `xml:"stationEndLine,omitempty" json:"stationEndLine,omitempty" yaml:"stationEndLine,omitempty"`
Stations []*Station `xml:"stations,omitempty" json:"stations,omitempty" yaml:"stations,omitempty"`
StationsDates []*string `xml:"stationsDates,omitempty" json:"stationsDates,omitempty" yaml:"stationsDates,omitempty"`
StationsMessages []*string `xml:"stationsMessages,omitempty" json:"stationsMessages,omitempty" yaml:"stationsMessages,omitempty"`
StationsPlatforms []*string `xml:"stationsPlatforms,omitempty" json:"stationsPlatforms,omitempty" yaml:"stationsPlatforms,omitempty"`
StationsStops []*bool `xml:"stationsStops,omitempty" json:"stationsStops,omitempty" yaml:"stationsStops,omitempty"`
}
// Perturbation was auto-generated from WSDL.
type Perturbation struct {
Cause *PerturbationCause `xml:"cause,omitempty" json:"cause,omitempty" yaml:"cause,omitempty"`
Consequence *PerturbationConsequence `xml:"consequence,omitempty" json:"consequence,omitempty" yaml:"consequence,omitempty"`
DateEnd *string `xml:"dateEnd,omitempty" json:"dateEnd,omitempty" yaml:"dateEnd,omitempty"`
DateStart *string `xml:"dateStart,omitempty" json:"dateStart,omitempty" yaml:"dateStart,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Incidents []*PerturbationIncident `xml:"incidents,omitempty" json:"incidents,omitempty" yaml:"incidents,omitempty"`
Level *string `xml:"level,omitempty" json:"level,omitempty" yaml:"level,omitempty"`
Line *Line `xml:"http://wsiv.ratp.fr/xsd line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
Media *string `xml:"media,omitempty" json:"media,omitempty" yaml:"media,omitempty"`
Message *PerturbationMessage `xml:"message,omitempty" json:"message,omitempty" yaml:"message,omitempty"`
Source *string `xml:"source,omitempty" json:"source,omitempty" yaml:"source,omitempty"`
Station *Station `xml:"station,omitempty" json:"station,omitempty" yaml:"station,omitempty"`
Status *string `xml:"status,omitempty" json:"status,omitempty" yaml:"status,omitempty"`
TimeEnd *string `xml:"timeEnd,omitempty" json:"timeEnd,omitempty" yaml:"timeEnd,omitempty"`
TimeStart *string `xml:"timeStart,omitempty" json:"timeStart,omitempty" yaml:"timeStart,omitempty"`
Title *string `xml:"title,omitempty" json:"title,omitempty" yaml:"title,omitempty"`
}
// PerturbationCause was auto-generated from WSDL.
type PerturbationCause struct {
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
ParentId *string `xml:"parentId,omitempty" json:"parentId,omitempty" yaml:"parentId,omitempty"`
ParentName *string `xml:"parentName,omitempty" json:"parentName,omitempty" yaml:"parentName,omitempty"`
}
// PerturbationConsequence was auto-generated from WSDL.
type PerturbationConsequence struct {
Code *string `xml:"code,omitempty" json:"code,omitempty" yaml:"code,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Level *string `xml:"level,omitempty" json:"level,omitempty" yaml:"level,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
}
// PerturbationIncident was auto-generated from WSDL.
type PerturbationIncident struct {
Date *string `xml:"date,omitempty" json:"date,omitempty" yaml:"date,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
IncidentLines []*PerturbationIncidentLine `xml:"incidentLines,omitempty" json:"incidentLines,omitempty" yaml:"incidentLines,omitempty"`
MessageGlobal *PerturbationMessage `xml:"messageGlobal,omitempty" json:"messageGlobal,omitempty" yaml:"messageGlobal,omitempty"`
Status *string `xml:"status,omitempty" json:"status,omitempty" yaml:"status,omitempty"`
}
// PerturbationIncidentLine was auto-generated from WSDL.
type PerturbationIncidentLine struct {
Consequence *PerturbationConsequence `xml:"consequence,omitempty" json:"consequence,omitempty" yaml:"consequence,omitempty"`
Line *Line `xml:"line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
MessageLarge *PerturbationMessage `xml:"messageLarge,omitempty" json:"messageLarge,omitempty" yaml:"messageLarge,omitempty"`
MessageMedium *PerturbationMessage `xml:"messageMedium,omitempty" json:"messageMedium,omitempty" yaml:"messageMedium,omitempty"`
MessageShort *PerturbationMessage `xml:"messageShort,omitempty" json:"messageShort,omitempty" yaml:"messageShort,omitempty"`
Stations []*Station `xml:"stations,omitempty" json:"stations,omitempty" yaml:"stations,omitempty"`
}
// PerturbationMessage was auto-generated from WSDL.
type PerturbationMessage struct {
MediaSpecific *bool `xml:"mediaSpecific,omitempty" json:"mediaSpecific,omitempty" yaml:"mediaSpecific,omitempty"`
Text *string `xml:"text,omitempty" json:"text,omitempty" yaml:"text,omitempty"`
Type *string `xml:"type,omitempty" json:"type,omitempty" yaml:"type,omitempty"`
Updated *bool `xml:"updated,omitempty" json:"updated,omitempty" yaml:"updated,omitempty"`
}
// Reseau was auto-generated from WSDL.
type Reseau struct {
Code *string `xml:"http://wsiv.ratp.fr/xsd code,omitempty" json:"code,omitempty" yaml:"code,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Image *string `xml:"image,omitempty" json:"image,omitempty" yaml:"image,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
}
// Station was auto-generated from WSDL.
type Station struct {
Direction *Direction `xml:"http://wsiv.ratp.fr/xsd direction,omitempty" json:"direction,omitempty" yaml:"direction,omitempty"`
GeoPointA *GeoPoint `xml:"geoPointA,omitempty" json:"geoPointA,omitempty" yaml:"geoPointA,omitempty"`
GeoPointR *GeoPoint `xml:"geoPointR,omitempty" json:"geoPointR,omitempty" yaml:"geoPointR,omitempty"`
Id *string `xml:"http://wsiv.ratp.fr/xsd id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
IdsNextA []*string `xml:"idsNextA,omitempty" json:"idsNextA,omitempty" yaml:"idsNextA,omitempty"`
IdsNextR []*string `xml:"idsNextR,omitempty" json:"idsNextR,omitempty" yaml:"idsNextR,omitempty"`
Line *Line `xml:"http://wsiv.ratp.fr/xsd line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
Name *string `xml:"http://wsiv.ratp.fr/xsd name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
StationArea *StationArea `xml:"stationArea,omitempty" json:"stationArea,omitempty" yaml:"stationArea,omitempty"`
}
// StationAcces was auto-generated from WSDL.
type StationAcces struct {
Address *string `xml:"address,omitempty" json:"address,omitempty" yaml:"address,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Index *string `xml:"index,omitempty" json:"index,omitempty" yaml:"index,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
TimeDaysLabel *string `xml:"timeDaysLabel,omitempty" json:"timeDaysLabel,omitempty" yaml:"timeDaysLabel,omitempty"`
TimeDaysStatus *string `xml:"timeDaysStatus,omitempty" json:"timeDaysStatus,omitempty" yaml:"timeDaysStatus,omitempty"`
TimeEnd *string `xml:"timeEnd,omitempty" json:"timeEnd,omitempty" yaml:"timeEnd,omitempty"`
TimeStart *string `xml:"timeStart,omitempty" json:"timeStart,omitempty" yaml:"timeStart,omitempty"`
X *float64 `xml:"x,omitempty" json:"x,omitempty" yaml:"x,omitempty"`
Y *float64 `xml:"y,omitempty" json:"y,omitempty" yaml:"y,omitempty"`
}
// StationArea was auto-generated from WSDL.
type StationArea struct {
Access []*StationAcces `xml:"access,omitempty" json:"access,omitempty" yaml:"access,omitempty"`
Id *string `xml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"`
Name *string `xml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"`
Stations []*Station `xml:"stations,omitempty" json:"stations,omitempty" yaml:"stations,omitempty"`
TarifsToParis []*Tarif `xml:"tarifsToParis,omitempty" json:"tarifsToParis,omitempty" yaml:"tarifsToParis,omitempty"`
ZoneCarteOrange *string `xml:"zoneCarteOrange,omitempty" json:"zoneCarteOrange,omitempty" yaml:"zoneCarteOrange,omitempty"`
}
// Tarif was auto-generated from WSDL.
type Tarif struct {
DemiTarif *float64 `xml:"demiTarif,omitempty" json:"demiTarif,omitempty" yaml:"demiTarif,omitempty"`
PleinTarif *float64 `xml:"pleinTarif,omitempty" json:"pleinTarif,omitempty" yaml:"pleinTarif,omitempty"`
ViaLine *Line `xml:"viaLine,omitempty" json:"viaLine,omitempty" yaml:"viaLine,omitempty"`
ViaReseau *Reseau `xml:"viaReseau,omitempty" json:"viaReseau,omitempty" yaml:"viaReseau,omitempty"`
}
// WrDirections was auto-generated from WSDL.
type WrDirections struct {
AmbiguityMessage *string `xml:"ambiguityMessage,omitempty" json:"ambiguityMessage,omitempty" yaml:"ambiguityMessage,omitempty"`
AmbiguousLines []*Line `xml:"ambiguousLines,omitempty" json:"ambiguousLines,omitempty" yaml:"ambiguousLines,omitempty"`
ArgumentLine *Line `xml:"argumentLine,omitempty" json:"argumentLine,omitempty" yaml:"argumentLine,omitempty"`
Directions []*Direction `xml:"directions,omitempty" json:"directions,omitempty" yaml:"directions,omitempty"`
}
// WrItineraries was auto-generated from WSDL.
type WrItineraries struct {
AmbiguityMessage *string `xml:"ambiguityMessage,omitempty" json:"ambiguityMessage,omitempty" yaml:"ambiguityMessage,omitempty"`
AmbiguousGeoPointsEnd []*GeoPoint `xml:"ambiguousGeoPointsEnd,omitempty" json:"ambiguousGeoPointsEnd,omitempty" yaml:"ambiguousGeoPointsEnd,omitempty"`
AmbiguousGeoPointsStart []*GeoPoint `xml:"ambiguousGeoPointsStart,omitempty" json:"ambiguousGeoPointsStart,omitempty" yaml:"ambiguousGeoPointsStart,omitempty"`
ArgumentDate *string `xml:"argumentDate,omitempty" json:"argumentDate,omitempty" yaml:"argumentDate,omitempty"`
Itineraries []*Itinerary `xml:"itineraries,omitempty" json:"itineraries,omitempty" yaml:"itineraries,omitempty"`
}
// WrMission was auto-generated from WSDL.
type WrMission struct {
AmbiguityMessage *string `xml:"ambiguityMessage,omitempty" json:"ambiguityMessage,omitempty" yaml:"ambiguityMessage,omitempty"`
AmbiguousLines []*Line `xml:"ambiguousLines,omitempty" json:"ambiguousLines,omitempty" yaml:"ambiguousLines,omitempty"`
ArgumentDate *string `xml:"argumentDate,omitempty" json:"argumentDate,omitempty" yaml:"argumentDate,omitempty"`
ArgumentLine *Line `xml:"argumentLine,omitempty" json:"argumentLine,omitempty" yaml:"argumentLine,omitempty"`
Mission *Mission `xml:"mission,omitempty" json:"mission,omitempty" yaml:"mission,omitempty"`
}
// WrMissions was auto-generated from WSDL.
type WrMissions struct {
AmbiguityMessage *string `xml:"ambiguityMessage,omitempty" json:"ambiguityMessage,omitempty" yaml:"ambiguityMessage,omitempty"`
AmbiguousDirections []*Direction `xml:"ambiguousDirections,omitempty" json:"ambiguousDirections,omitempty" yaml:"ambiguousDirections,omitempty"`
AmbiguousLines []*Line `xml:"ambiguousLines,omitempty" json:"ambiguousLines,omitempty" yaml:"ambiguousLines,omitempty"`
AmbiguousStations []*Station `xml:"ambiguousStations,omitempty" json:"ambiguousStations,omitempty" yaml:"ambiguousStations,omitempty"`
ArgumentDate *string `xml:"argumentDate,omitempty" json:"argumentDate,omitempty" yaml:"argumentDate,omitempty"`
ArgumentDirection *Direction `xml:"argumentDirection,omitempty" json:"argumentDirection,omitempty" yaml:"argumentDirection,omitempty"`
ArgumentLine *Line `xml:"argumentLine,omitempty" json:"argumentLine,omitempty" yaml:"argumentLine,omitempty"`
ArgumentStation *Station `xml:"argumentStation,omitempty" json:"argumentStation,omitempty" yaml:"argumentStation,omitempty"`
Missions []*Mission `xml:"missions,omitempty" json:"missions,omitempty" yaml:"missions,omitempty"`
Perturbations []*Perturbation `xml:"perturbations,omitempty" json:"perturbations,omitempty" yaml:"perturbations,omitempty"`
}
// WrPerturbations was auto-generated from WSDL.
type WrPerturbations struct {
ArgumentMedia *string `xml:"argumentMedia,omitempty" json:"argumentMedia,omitempty" yaml:"argumentMedia,omitempty"`
ArgumentSource *string `xml:"argumentSource,omitempty" json:"argumentSource,omitempty" yaml:"argumentSource,omitempty"`
Perturbations []*Perturbation `xml:"perturbations,omitempty" json:"perturbations,omitempty" yaml:"perturbations,omitempty"`
}
// WrStations was auto-generated from WSDL.
type WrStations struct {
AmbiguityMessage *string `xml:"ambiguityMessage,omitempty" json:"ambiguityMessage,omitempty" yaml:"ambiguityMessage,omitempty"`
AmbiguousGeoPoints []*GeoPoint `xml:"ambiguousGeoPoints,omitempty" json:"ambiguousGeoPoints,omitempty" yaml:"ambiguousGeoPoints,omitempty"`
AmbiguousLines []*Line `xml:"ambiguousLines,omitempty" json:"ambiguousLines,omitempty" yaml:"ambiguousLines,omitempty"`
ArgumentDirection *Direction `xml:"argumentDirection,omitempty" json:"argumentDirection,omitempty" yaml:"argumentDirection,omitempty"`
ArgumentGeoPoint *GeoPoint `xml:"argumentGeoPoint,omitempty" json:"argumentGeoPoint,omitempty" yaml:"argumentGeoPoint,omitempty"`
ArgumentLine *Line `xml:"argumentLine,omitempty" json:"argumentLine,omitempty" yaml:"argumentLine,omitempty"`
Distances []*int `xml:"distances,omitempty" json:"distances,omitempty" yaml:"distances,omitempty"`
Stations []*Station `xml:"stations,omitempty" json:"stations,omitempty" yaml:"stations,omitempty"`
}
// GetDirections was auto-generated from WSDL.
type GetDirections struct {
Line *Line `xml:"http://wsiv.ratp.fr line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
}
// GetDirectionsResponse was auto-generated from WSDL.
type GetDirectionsResponse struct {
Return *WrDirections `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetGeoPoints was auto-generated from WSDL.
type GetGeoPoints struct {
Gp *GeoPoint `xml:"gp,omitempty" json:"gp,omitempty" yaml:"gp,omitempty"`
Limit *int `xml:"limit,omitempty" json:"limit,omitempty" yaml:"limit,omitempty"`
}
// GetGeoPointsResponse was auto-generated from WSDL.
type GetGeoPointsResponse struct {
Return []*GeoPoint `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetLines was auto-generated from WSDL.
type GetLines struct {
Line *Line `xml:"tns:line,omitempty" json:"line,omitempty" yaml:"line,omitempty"`
}
// GetLinesResponse was auto-generated from WSDL.
type GetLinesResponse struct {
Return []*Line `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetMission was auto-generated from WSDL.
type GetMission struct {
Mission *Mission `xml:"mission,omitempty" json:"mission,omitempty" yaml:"mission,omitempty"`
Date *string `xml:"date,omitempty" json:"date,omitempty" yaml:"date,omitempty"`
StationAll *bool `xml:"stationAll,omitempty" json:"stationAll,omitempty" yaml:"stationAll,omitempty"`
StationSortAlpha *bool `xml:"stationSortAlpha,omitempty" json:"stationSortAlpha,omitempty" yaml:"stationSortAlpha,omitempty"`
}
// GetMissionResponse was auto-generated from WSDL.
type GetMissionResponse struct {
Return *WrMission `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetMissionsFirstLast was auto-generated from WSDL.
type GetMissionsFirstLast struct {
Station *Station `xml:"http://wsiv.ratp.fr station,omitempty" json:"station,omitempty" yaml:"station,omitempty"`
Direction *Direction `xml:"http://wsiv.ratp.fr direction,omitempty" json:"direction,omitempty" yaml:"direction,omitempty"`
Date *string `xml:"http://wsiv.ratp.fr date,omitempty" json:"date,omitempty" yaml:"date,omitempty"`
}
// GetMissionsFirstLastResponse was auto-generated from WSDL.
type GetMissionsFirstLastResponse struct {
Return *WrMissions `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetMissionsFrequency was auto-generated from WSDL.
type GetMissionsFrequency struct {
Station *Station `xml:"http://wsiv.ratp.fr station,omitempty" json:"station,omitempty" yaml:"station,omitempty"`
Direction *Direction `xml:"http://wsiv.ratp.fr direction,omitempty" json:"direction,omitempty" yaml:"direction,omitempty"`
StationEndLine *Station `xml:"http://wsiv.ratp.fr stationEndLine,omitempty" json:"stationEndLine,omitempty" yaml:"stationEndLine,omitempty"`
StationEnd *Station `xml:"http://wsiv.ratp.fr stationEnd,omitempty" json:"stationEnd,omitempty" yaml:"stationEnd,omitempty"`
DatesStart []*string `xml:"http://wsiv.ratp.fr datesStart,omitempty" json:"datesStart,omitempty" yaml:"datesStart,omitempty"`
DatesEnd []*string `xml:"http://wsiv.ratp.fr datesEnd,omitempty" json:"datesEnd,omitempty" yaml:"datesEnd,omitempty"`
}
// GetMissionsFrequencyResponse was auto-generated from WSDL.
type GetMissionsFrequencyResponse struct {
Return *WrMissions `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetMissionsNext was auto-generated from WSDL.
type GetMissionsNext struct {
Station *Station `xml:"http://wsiv.ratp.fr station,omitempty" json:"station,omitempty" yaml:"station,omitempty"`
Direction *Direction `xml:"http://wsiv.ratp.fr direction,omitempty" json:"direction,omitempty" yaml:"direction,omitempty"`
DateStart *string `xml:"http://wsiv.ratp.fr dateStart,omitempty" json:"dateStart,omitempty" yaml:"dateStart,omitempty"`
Limit *int `xml:"limit,omitempty" json:"limit,omitempty" yaml:"limit,omitempty"`
}
// GetMissionsNextResponse was auto-generated from WSDL.
type GetMissionsNextResponse struct {
Return *WrMissions `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetPerturbations was auto-generated from WSDL.
type GetPerturbations struct {
Perturbation *Perturbation `xml:"http://wsiv.ratp.fr perturbation,omitempty" json:"perturbation,omitempty" yaml:"perturbation,omitempty"`
IsXmlText *bool `xml:"http://wsiv.ratp.fr isXmlText,omitempty" json:"isXmlText,omitempty" yaml:"isXmlText,omitempty"`
}
// GetPerturbationsResponse was auto-generated from WSDL.
type GetPerturbationsResponse struct {
Return *WrPerturbations `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetStations was auto-generated from WSDL.
type GetStations struct {
Station *Station `xml:"http://wsiv.ratp.fr station,omitempty" json:"station,omitempty" yaml:"station,omitempty"`
Gp *GeoPoint `xml:"http://wsiv.ratp.fr gp,omitempty" json:"gp,omitempty" yaml:"gp,omitempty"`
Distances []*int `xml:"distances,omitempty" json:"distances,omitempty" yaml:"distances,omitempty"`
Limit *int `xml:"limit,omitempty" json:"limit,omitempty" yaml:"limit,omitempty"`
SortAlpha *bool `xml:"sortAlpha,omitempty" json:"sortAlpha,omitempty" yaml:"sortAlpha,omitempty"`
}
// GetStationsResponse was auto-generated from WSDL.
type GetStationsResponse struct {
Return *WrStations `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// GetVersionResponse was auto-generated from WSDL.
type GetVersionResponse struct {
Return *string `xml:"return,omitempty" json:"return,omitempty" yaml:"return,omitempty"`
}
// Operation wrapper for GetDirections.
// OperationGetDirectionsRequest was auto-generated from WSDL.
type OperationGetDirectionsRequest struct {
Parameters *GetDirections `xml:"ns:getDirections,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetDirections.
// OperationGetDirectionsResponse was auto-generated from WSDL.
type OperationGetDirectionsResponse struct {
Parameters *GetDirectionsResponse `xml:"getDirectionsResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetGeoPoints.
// OperationGetGeoPointsRequest was auto-generated from WSDL.
type OperationGetGeoPointsRequest struct {
Parameters *GetGeoPoints `xml:"ns:getGeoPoints,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetGeoPoints.
// OperationGetGeoPointsResponse was auto-generated from WSDL.
type OperationGetGeoPointsResponse struct {
Parameters *GetGeoPointsResponse `xml:"getGeoPointsResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetLines.
// OperationGetLinesRequest was auto-generated from WSDL.
type OperationGetLinesRequest struct {
Parameters *GetLines `xml:"ns:getLines,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetLines.
// OperationGetLinesResponse was auto-generated from WSDL.
type OperationGetLinesResponse struct {
Parameters *GetLinesResponse `xml:"getLinesResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMission.
// OperationGetMissionRequest was auto-generated from WSDL.
type OperationGetMissionRequest struct {
Parameters *GetMission `xml:"ns:getMission,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMission.
// OperationGetMissionResponse was auto-generated from WSDL.
type OperationGetMissionResponse struct {
Parameters *GetMissionResponse `xml:"getMissionResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMissionsFirstLast.
// OperationGetMissionsFirstLastRequest was auto-generated from
// WSDL.
type OperationGetMissionsFirstLastRequest struct {
Parameters *GetMissionsFirstLast `xml:"ns:getMissionsFirstLast,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMissionsFirstLast.
// OperationGetMissionsFirstLastResponse was auto-generated from
// WSDL.
type OperationGetMissionsFirstLastResponse struct {
Parameters *GetMissionsFirstLastResponse `xml:"getMissionsFirstLastResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMissionsFrequency.
// OperationGetMissionsFrequencyRequest was auto-generated from
// WSDL.
type OperationGetMissionsFrequencyRequest struct {
Parameters *GetMissionsFrequency `xml:"ns:getMissionsFrequency,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMissionsFrequency.
// OperationGetMissionsFrequencyResponse was auto-generated from
// WSDL.
type OperationGetMissionsFrequencyResponse struct {
Parameters *GetMissionsFrequencyResponse `xml:"getMissionsFrequencyResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMissionsNext.
// OperationGetMissionsNextRequest was auto-generated from WSDL.
type OperationGetMissionsNextRequest struct {
Parameters *GetMissionsNext `xml:"ns:getMissionsNext,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetMissionsNext.
// OperationGetMissionsNextResponse was auto-generated from WSDL.
type OperationGetMissionsNextResponse struct {
Parameters *GetMissionsNextResponse `xml:"getMissionsNextResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetPerturbations.
// OperationGetPerturbationsRequest was auto-generated from WSDL.
type OperationGetPerturbationsRequest struct {
Parameters *GetPerturbations `xml:"ns:getPerturbations,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetPerturbations.
// OperationGetPerturbationsResponse was auto-generated from WSDL.
type OperationGetPerturbationsResponse struct {
Parameters *GetPerturbationsResponse `xml:"getPerturbationsResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetStations.
// OperationGetStationsRequest was auto-generated from WSDL.
type OperationGetStationsRequest struct {
Parameters *GetStations `xml:"ns:getStations,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetStations.
// OperationGetStationsResponse was auto-generated from WSDL.
type OperationGetStationsResponse struct {
Parameters *GetStationsResponse `xml:"getStationsResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
type OperationGetVersionRequest struct {
Parameters *GetVersionResponse `xml:"ns:getVersion,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// Operation wrapper for GetVersion.
// OperationGetVersionResponse was auto-generated from WSDL.
type OperationGetVersionResponse struct {
Parameters *GetVersionResponse `xml:"getVersionResponse,omitempty" json:"parameters,omitempty" yaml:"parameters,omitempty"`
}
// wsivPortType implements the WsivPortType interface.
type wsivPortType struct {
cli *soap.Client
}
// GetDirections was auto-generated from WSDL.
func (p *wsivPortType) GetDirections(parameters *GetDirections) (*GetDirectionsResponse, error) {
α := OperationGetDirectionsRequest{
parameters,
}
γ := OperationGetDirectionsResponse{}
if err := p.cli.RoundTripWithAction("getDirections", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetGeoPoints was auto-generated from WSDL.
func (p *wsivPortType) GetGeoPoints(parameters *GetGeoPoints) (*GetGeoPointsResponse, error) {
α := OperationGetGeoPointsRequest{
parameters,
}
γ := OperationGetGeoPointsResponse{}
if err := p.cli.RoundTripWithAction("getGeoPoints", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetLines was auto-generated from WSDL.
func (p *wsivPortType) GetLines(parameters *GetLines) (*GetLinesResponse, error) {
α := OperationGetLinesRequest{
parameters,
}
γ := OperationGetLinesResponse{}
if err := p.cli.RoundTripWithAction("getLines", α, &γ); err != nil {
return nil, err
}
fmt.Println(γ)
return γ.Parameters, nil
}
// GetMission was auto-generated from WSDL.
func (p *wsivPortType) GetMission(parameters *GetMission) (*GetMissionResponse, error) {
α := OperationGetMissionRequest{
parameters,
}
γ := OperationGetMissionResponse{}
if err := p.cli.RoundTripWithAction("getMission", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetMissionsFirstLast was auto-generated from WSDL.
func (p *wsivPortType) GetMissionsFirstLast(parameters *GetMissionsFirstLast) (*GetMissionsFirstLastResponse, error) {
α := OperationGetMissionsFirstLastRequest{
parameters,
}
γ := OperationGetMissionsFirstLastResponse{}
if err := p.cli.RoundTripWithAction("getMissionsFirstLast", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetMissionsFrequency was auto-generated from WSDL.
func (p *wsivPortType) GetMissionsFrequency(parameters *GetMissionsFrequency) (*GetMissionsFrequencyResponse, error) {
α := OperationGetMissionsFrequencyRequest{
parameters,
}
γ := OperationGetMissionsFrequencyResponse{}
if err := p.cli.RoundTripWithAction("getMissionsFrequency", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetMissionsNext was auto-generated from WSDL.
func (p *wsivPortType) GetMissionsNext(parameters *GetMissionsNext) (*GetMissionsNextResponse, error) {
α := OperationGetMissionsNextRequest{
parameters,
}
γ := OperationGetMissionsNextResponse{}
if err := p.cli.RoundTripWithAction("getMissionsNext", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetPerturbations was auto-generated from WSDL.
func (p *wsivPortType) GetPerturbations(parameters *GetPerturbations) (*GetPerturbationsResponse, error) {
α := OperationGetPerturbationsRequest{
parameters,
}
γ := OperationGetPerturbationsResponse{}
if err := p.cli.RoundTripWithAction("getPerturbations", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetStations was auto-generated from WSDL.
func (p *wsivPortType) GetStations(parameters *GetStations) (*GetStationsResponse, error) {
α := OperationGetStationsRequest{
parameters,
}
γ := OperationGetStationsResponse{}
if err := p.cli.RoundTripWithAction("getStations", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
// GetVersion was auto-generated from WSDL.
func (p *wsivPortType) GetVersion() (*GetVersionResponse, error) {
α := OperationGetVersionRequest{&GetVersionResponse{}}
γ := OperationGetVersionResponse{}
if err := p.cli.RoundTripWithAction("getVersion", α, &γ); err != nil {
return nil, err
}
return γ.Parameters, nil
}
|
package itemCat
import (
"github.com/tidwall/gjson"
"io/ioutil"
"net/http"
"strconv"
)
// ArtCat functions related to ML's api that get items by categories
type ItemCat struct {
data string
}
// LoadItems load all items of page one
// Returns 1 on fail
func (a *ItemCat) LoadItems(idCat string) int {
resp, err := http.Get("https://api.mercadolibre.com/sites/MLA/search?category=" + idCat)
if err != nil {
a.data = ""
return 1
}
resp_body, err := ioutil.ReadAll(resp.Body)
if err != nil || resp.StatusCode != 200 {
a.data = ""
return 1
}
a.data = string(resp_body)
return 0
}
// GetData returns response of last load
func (a ItemCat) GetData() string {
return a.data
}
// GetPrices returns an array of float of the prices of the items previously
// loaded
func (a ItemCat) GetPrices() ([]float32, int) {
result := gjson.Get(a.data, "results.#.price")
if !result.Exists() || len(result.Array()) == 0 {
return nil, 1
}
res := make([]float32, 0, 5)
for _, price := range result.Array() {
priceParsed, err := strconv.ParseFloat(price.String(), 32)
if err != nil {
return res, 1
}
priceFloat := float32(priceParsed)
res = append(res, priceFloat)
}
return res, 0
}
|
package port
import (
"context"
"time"
)
// BayarSetoranInport ...
type BayarSetoranInport interface {
Execute(ctx context.Context, req BayarSetoranRequest) (*BayarSetoranResponse, error)
}
// BayarSetoranRequest ...
type BayarSetoranRequest struct {
TagihanID string
TanggalHariIni time.Time `json:"-"`
}
// BayarSetoranResponse ...
type BayarSetoranResponse struct {
}
|
package main
import (
"fmt"
"log"
"os"
"path/filepath"
"github.com/alecthomas/kingpin"
)
const defaultCountFile = ".config/ct/count"
var eLog = log.New(os.Stderr, "", 0)
var (
rotate = kingpin.Flag("rotate", "Number of rotation.").Short('r').Int()
countFilePath = kingpin.Flag("file", "File to save number.").Short('f').String()
up = kingpin.Command("up", "Count up the number.").Default()
upStep = setStep(up)
reset = kingpin.Command("reset", "Reset the stored number.")
resetNum = reset.Arg("number", "Reset number.").Default("0").Int()
)
func setStep(c *kingpin.CmdClause) *int {
return c.Arg("step", "Counting step.").Default("1").Int()
}
func openCountFile(path string) (*os.File, bool, error) {
dir := filepath.Dir(path)
if _, err := os.Stat(dir); err != nil {
err = os.MkdirAll(dir, 0777)
if err != nil {
return nil, false, err
}
}
var f *os.File
var err error
var isNew bool
if _, err = os.Stat(path); err == nil {
f, err = os.OpenFile(path, os.O_RDWR, 0666)
} else {
f, err = os.Create(path)
isNew = true
}
return f, isNew, err
}
func cmdMain() int {
cmd := kingpin.Parse()
path := filepath.Join(os.Getenv("HOME"), defaultCountFile)
if *countFilePath != "" {
path = *countFilePath
}
f, new, err := openCountFile(path)
if err != nil {
eLog.Printf("error open file '%s': %s", path, err)
return 1
}
defer f.Close()
var num int
if new {
num = 0
} else {
_, err = fmt.Fscanf(f, "%d", &num)
if err != nil {
eLog.Printf("error read file '%s': %s", path, err)
return 1
}
}
switch cmd {
case "up":
num += *upStep
case "reset":
num = *resetNum
}
if *rotate > 0 && num > *rotate {
num = 1
}
_, err = f.Seek(0, 0)
if err != nil {
eLog.Printf("error write file '%s': %s", path, err)
return 1
}
_, err = fmt.Fprintf(f, "%d\n", num)
if err != nil {
eLog.Printf("error write file '%s': %s", path, err)
return 1
}
fmt.Println(num)
return 0
}
func main() {
os.Exit(cmdMain())
}
|
package cosmos
import "testing"
func TestCosmosCollection(t *testing.T) {
client := getDummyClient()
db := client.Database("dbtest")
coll := db.Collection("colltest")
if coll.client.rType != "colls" {
t.Errorf("%+v", coll.client)
}
if coll.client.rLink != "dbs/dbtest/colls/colltest" {
t.Errorf("%+v", coll.client)
}
if coll.client.path != "dbs/dbtest/colls/colltest" {
t.Errorf("%+v", coll.client)
}
colls := db.Collections()
if coll.client.rType != "colls" {
t.Errorf("Wrong rType %s", colls.client.rType)
}
if colls.client.rLink != "dbs/dbtest" {
t.Errorf("Wrong rLink %s", colls.client.rLink)
}
if colls.client.path != "dbs/dbtest/colls" {
t.Errorf("Wrong path %s", colls.client.path)
}
}
|
package main
import (
"math/rand"
"net/http"
"os"
"strings"
"time"
"github.com/nektro/mantle/pkg/db"
"github.com/nektro/mantle/pkg/handler"
"github.com/nektro/mantle/pkg/idata"
"github.com/nektro/mantle/pkg/ws"
"github.com/nektro/go-util/util"
etc "github.com/nektro/go.etc"
"github.com/nektro/go.etc/translations"
"github.com/spf13/pflag"
_ "github.com/nektro/mantle/statik"
)
// Version takes in version string from build_all.sh
var Version = "vMASTER"
func main() {
rand.Seed(time.Now().UnixNano())
idata.Version = etc.FixBareVersion(Version)
util.Log("Welcome to " + idata.Name + " " + idata.Version + ".")
//
pflag.IntVar(&idata.Config.Port, "port", 8000, "The port to bind the web server to.")
etc.AppID = strings.ToLower(idata.Name)
etc.PreInit()
//
etc.Init("mantle", &idata.Config, "./verify", handler.SaveOAuth2InfoCb)
//
// database initialization
db.Init()
translations.Fetch()
translations.Init()
//
// setup graceful stop
util.RunOnClose(func() {
util.Log("Gracefully shutting down...")
util.Log("Saving database to disk")
db.Close()
util.Log("Closing all remaining active WebSocket connections")
ws.Close()
util.Log("Done")
os.Exit(0)
})
//
// create http service
fRegister("/", sPaths{
GET: handler.InviteGet,
Sub: map[string]sPaths{
"invite": sPaths{POS: handler.InvitePost},
"verify": sPaths{GET: handler.Verify},
"ws": sPaths{GET: handler.Websocket},
"chat": sPaths{
Sub: map[string]sPaths{
"": sPaths{GET: handler.Chat},
},
},
"api": sPaths{
Sub: map[string]sPaths{
"about": sPaths{GET: handler.ApiAbout},
"update_property": sPaths{PUT: handler.ApiPropertyUpdate},
"etc": sPaths{
Sub: map[string]sPaths{
"role_colors.css": sPaths{GET: handler.EtcRoleColorCSS},
"badges": sPaths{
Sub: map[string]sPaths{
"members_online.svg": sPaths{GET: handler.EtcBadgeMembersOnline},
"members_total.svg": sPaths{GET: handler.EtcBadgeMembersTotal},
},
},
},
},
"users": sPaths{
Sub: map[string]sPaths{
"@me": sPaths{GET: handler.UsersMe},
"online": sPaths{GET: handler.UsersOnline},
"{uuid:[0-9A-Z]+}": sPaths{
GET: handler.UsersRead,
PUT: handler.UserUpdate,
},
},
},
"channels": sPaths{
Sub: map[string]sPaths{
"@me": sPaths{GET: handler.ChannelsMe},
"create": sPaths{POS: handler.ChannelCreate},
"{uuid:[0-9A-Z]+}": sPaths{
GET: handler.ChannelRead,
PUT: handler.ChannelUpdate,
DEL: handler.ChannelDelete,
Sub: map[string]sPaths{
"messages": sPaths{
GET: handler.ChannelMessagesRead,
DEL: handler.ChannelMessagesDelete,
},
},
},
},
},
"roles": sPaths{
Sub: map[string]sPaths{
"@me": sPaths{GET: handler.RolesMe},
"create": sPaths{POS: handler.RolesCreate},
"{uuid:[0-9A-Z]+}": sPaths{
PUT: handler.RoleUpdate,
DEL: handler.RoleDelete,
},
},
},
"invites": sPaths{
Sub: map[string]sPaths{
"@me": sPaths{GET: handler.InvitesMe},
"create": sPaths{POS: handler.InvitesCreate},
"{uuid:[0-9A-Z]+}": sPaths{
PUT: handler.InviteUpdate,
DEL: handler.InviteDelete,
},
},
},
},
},
},
})
//
// start server
etc.StartServer(idata.Config.Port)
}
type sPaths struct {
GET http.HandlerFunc
POS http.HandlerFunc
PUT http.HandlerFunc
DEL http.HandlerFunc
Sub map[string]sPaths
}
func iregister(m, p string, h http.HandlerFunc) {
if h == nil {
return
}
etc.Router.Methods(m).Path(p).HandlerFunc(h)
}
func fRegister(s string, p sPaths) {
if strings.HasPrefix(s, "//") {
s = s[1:]
}
iregister(http.MethodGet, s, p.GET)
iregister(http.MethodPost, s, p.POS)
iregister(http.MethodPut, s, p.PUT)
iregister(http.MethodDelete, s, p.DEL)
//
if p.Sub != nil {
for k, v := range p.Sub {
fRegister(s+"/"+k, v)
}
}
}
|
package main
import "chapter4/context"
func main() {
context.Initialize()
}
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
"time"
"github.com/araddon/dateparse"
"github.com/bwmarrin/discordgo"
)
// global variables
var tokensFile = "tokens.txt"
var discordToken string
var commandPrefix string
// remindmes = list of structs w/ author, time message will execute (post-converted), remind message, goroutine
// could also do a map of author -> list of remindmes
// remindmes = append(remindmes, <thing to add>)
// right now the work flow is going to look like this:
// on receive message:
// check for command
// check for formatting of command
// spin up background goroutine
// - goroutine should take in:
// author
// time request
// desired message
// add goroutine to master list
// send remindme confirmation message
// react to remindme confirmation message with options
// link to message is:
// https://discordapp.com/channels/<guildID>/<channelID>/<messageID>
// guildID == "" for PMs -> "@me" instead of guildID
// on reaction:
// check content for remindme confirmation update + author = the bot
// check reactor for original message author
// handle reaction
// goroutine will schedule job based on params passed in
// check to make sure message still exists -> then remindmes?
// could use standardized date / time and sleep by subtracting current time from it
// that way could resume saved and restored jobs on startup
// here are some potential options:
// delete remindme (will delete the confirmation and original message)
// delete command (just delete original message command)
func setupTokens(fileName string) {
file, err := os.Open(fileName)
errCheck("Error opening \""+fileName+"\" - the file does not exist", err)
defer file.Close()
scanner := bufio.NewScanner(file)
// increment scanner to the first token
if scanner.Scan() {
discordToken = scanner.Text()
}
errCheck("Error reading \""+fileName+"\" - the file cannot be read", scanner.Err())
return
}
func errCheck(msg string, err error) {
if err != nil {
fmt.Printf("%s: %+v\n", msg, err)
panic(err)
}
return
}
func messageHandler(discord *discordgo.Session, message *discordgo.MessageCreate) {
if strings.HasPrefix(message.Content, commandPrefix) {
fmt.Printf("GuildID: %s ChannelID: %s Timestamp: %s Username: %s MessageID: %s Content: %s Current Time: %s\n", message.GuildID, message.ChannelID, message.Timestamp, message.Author.Username, message.ID, message.Content, time.Now().UTC().String())
// var time string
var reminder string
var timestring string
parameters := strings.TrimSpace(message.Content[len(commandPrefix):])
if strings.Contains(parameters, "\"") {
if strings.IndexByte(parameters, '"') == strings.LastIndexByte(parameters, '"') {
// send update saying that only one " exists
return
}
reminder = parameters[strings.IndexByte(parameters, '"') : strings.LastIndexByte(parameters, '"')+1]
fmt.Printf(reminder)
}
if len(reminder) > 0 {
parameters = strings.Trim(parameters, reminder)
}
timestring = strings.TrimSpace(parameters)
fmt.Printf("\nTimestamp: " + timestring)
if len(timestring) > 0 {
parsedtime, err := dateparse.ParseAny(timestring)
if err != nil {
// parsedtime was successful
fmt.Printf("\nParsed Timestring: " + parsedtime.String())
} else {
// check for other forms of parsed time
// if not successful throw error
// timenums := regexp.MustCompile("\\d+").FindStringSubmatch(timestring)
// numberInt, err := strconv.Atoi(timenums)
if err != nil {
// send error message abt not including a number
}
}
}
// spin up goroutine with sleep using extracted parameters
// on goroutine completion send message id through channel
// write function for extracting all of this stuff
// write function for determining time to sleep
// have goroutine go to sleep for time
// on startup loop through loaded json of messages and extract all of the data
// only if the message exists tho
// save a list of every message -
// loop through every message and clear them out if they are done
// on shutdown + startup
// delete messages.txt file
// spin up a goroutine for each message
// goroutine checks for if remindme is still active or not
// if so appends to message.txt
// SORT THE LIST OF MESSAGES BY COMPLETION DATE
// UPON COMPLETION OF A GOROUTINE - maybe kill a message?
}
}
// func extractDigitIndices(input string) (int, int) {
// for i := 0; i < len(input); i++ {
// if unicode.IsNumber(input[i]) {
// for j := i; j < len(input); j++ {
// if !unicode.isNumber(input[j]) {
// return i, j
// }
// }
// }
// }
// return -1, -1
// }
func sortDuration(unit string) int {
unit = strings.ToLower(unit)
if strings.HasPrefix(unit, "s") {
return 1
} else if strings.HasPrefix(unit, "mi") {
return 60
} else if strings.HasPrefix(unit, "h") {
return 3600
} else if strings.HasPrefix(unit, "d") {
return 86400
} else if strings.HasPrefix(unit, "w") {
return 604800
} else if strings.HasPrefix(unit, "mo") {
return 2592000
} else if strings.HasPrefix(unit, "y") {
return 31104000
}
return -1
}
func remindMe(message *discordgo.Message) {
// parameters := regexp.MustCompile("\\s+").Split(message.Content, 4)
// time, err := strconv.Atoi(parameters[1])
// multiplier := sortDuration(parameters[2])
}
func main() {
setupTokens(tokensFile)
discord, err := discordgo.New("Bot " + discordToken)
errCheck("error creating discord session", err)
// user, err := discord.User(s"@me")
errCheck("error retrieving user from discord", err)
// commandPrefix = "<@" + user.ID + ">"
commandPrefix = "rm!"
fmt.Printf("Command Prefix: %s\n", commandPrefix)
discord.AddHandler(messageHandler)
errCheck("Error opening connection to Discord", discord.Open())
defer discord.Close()
<-make(chan struct{})
}
|
package main
import (
"fmt"
"html/template"
"log"
"net/http"
//"github.com/feng/future/agfun/data"
"github.com/feng/future/agfun/control"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Println("URL**************", r.URL.Path)
})
http.HandleFunc("/index", control.Agfun)
http.HandleFunc("/login", control.Login)
http.HandleFunc("/test", test)
//文件系统的路由
http.Handle("/css/", http.FileServer(http.Dir("./template")))
http.Handle("/js/", http.FileServer(http.Dir("./template")))
http.Handle("/images/", http.FileServer(http.Dir("./template")))
err := http.ListenAndServe(":8000", nil)
if err != nil {
log.Fatal(err)
}
}
func test(w http.ResponseWriter, r *http.Request) {
//sess := globalSessions.SessionStart(w,r)
//sess.Set("username", "feng")
t, err := template.ParseFiles("./template/rendered-index.html")
if err != nil {
fmt.Println(err)
return
}
t.Execute(w, nil)
}
|
package vsphere
import (
"context"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vmware/govmomi/vim25/mo"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/openshift/installer/pkg/destroy/providers"
installertypes "github.com/openshift/installer/pkg/types"
)
// ClusterUninstaller holds the various options for the cluster we want to delete.
type ClusterUninstaller struct {
ClusterID string
InfraID string
terraformPlatform string
Logger logrus.FieldLogger
client API
}
// New returns an VSphere destroyer from ClusterMetadata.
func New(logger logrus.FieldLogger, metadata *installertypes.ClusterMetadata) (providers.Destroyer, error) {
client, err := NewClient(metadata.VSphere.VCenter, metadata.VSphere.Username, metadata.VSphere.Password)
if err != nil {
return nil, err
}
return newWithClient(logger, metadata, client), nil
}
func newWithClient(logger logrus.FieldLogger, metadata *installertypes.ClusterMetadata, client API) *ClusterUninstaller {
return &ClusterUninstaller{
ClusterID: metadata.ClusterID,
InfraID: metadata.InfraID,
terraformPlatform: metadata.VSphere.TerraformPlatform,
Logger: logger,
client: client,
}
}
func (o *ClusterUninstaller) deleteFolder(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
o.Logger.Debug("Delete Folder")
folderMoList, err := o.client.ListFolders(ctx, o.InfraID)
if err != nil {
return err
}
if len(folderMoList) == 0 {
o.Logger.Debug("All folders deleted")
return nil
}
// If there are no children in the folder, go ahead and remove it
for _, f := range folderMoList {
folderLogger := o.Logger.WithField("Folder", f.Name)
if numChildren := len(f.ChildEntity); numChildren > 0 {
entities := make([]string, 0, numChildren)
for _, child := range f.ChildEntity {
entities = append(entities, fmt.Sprintf("%s:%s", child.Type, child.Value))
}
folderLogger.Errorf("Folder should be empty but contains %d objects: %s. The installer will retry removing \"virtualmachine\" objects, but any other type will need to be removed manually before the deprovision can proceed", numChildren, strings.Join(entities, ", "))
return errors.Errorf("Expected Folder %s to be empty", f.Name)
}
err = o.client.DeleteFolder(ctx, f)
if err != nil {
folderLogger.Debug(err)
return err
}
folderLogger.Info("Destroyed")
}
return nil
}
func (o *ClusterUninstaller) deleteStoragePolicy(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Minute*30)
defer cancel()
policyName := fmt.Sprintf("openshift-storage-policy-%s", o.InfraID)
policyLogger := o.Logger.WithField("StoragePolicy", policyName)
policyLogger.Debug("Delete")
err := o.client.DeleteStoragePolicy(ctx, policyName)
if err != nil {
policyLogger.Debug(err)
return err
}
policyLogger.Info("Destroyed")
return nil
}
func (o *ClusterUninstaller) deleteTag(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Minute*30)
defer cancel()
tagLogger := o.Logger.WithField("Tag", o.InfraID)
tagLogger.Debug("Delete")
err := o.client.DeleteTag(ctx, o.InfraID)
if err != nil {
tagLogger.Debug(err)
return err
}
tagLogger.Info("Deleted")
return nil
}
func (o *ClusterUninstaller) deleteTagCategory(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
categoryID := "openshift-" + o.InfraID
tcLogger := o.Logger.WithField("TagCategory", categoryID)
tcLogger.Debug("Delete")
err := o.client.DeleteTagCategory(ctx, categoryID)
if err != nil {
tcLogger.Errorln(err)
return err
}
tcLogger.Info("Deleted")
return nil
}
func (o *ClusterUninstaller) stopVirtualMachine(ctx context.Context, vmMO mo.VirtualMachine) error {
virtualMachineLogger := o.Logger.WithField("VirtualMachine", vmMO.Name)
err := o.client.StopVirtualMachine(ctx, vmMO)
if err != nil {
virtualMachineLogger.Debug(err)
return err
}
virtualMachineLogger.Debug("Powered off")
return nil
}
func (o *ClusterUninstaller) stopVirtualMachines(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Minute*30)
defer cancel()
o.Logger.Debug("Power Off Virtual Machines")
found, err := o.client.ListVirtualMachines(ctx, o.InfraID)
if err != nil {
o.Logger.Debug(err)
return err
}
var errs []error
for _, vmMO := range found {
if !isPoweredOff(vmMO) {
if err := o.stopVirtualMachine(ctx, vmMO); err != nil {
errs = append(errs, err)
}
}
}
return utilerrors.NewAggregate(errs)
}
func (o *ClusterUninstaller) deleteVirtualMachine(ctx context.Context, vmMO mo.VirtualMachine) error {
virtualMachineLogger := o.Logger.WithField("VirtualMachine", vmMO.Name)
err := o.client.DeleteVirtualMachine(ctx, vmMO)
if err != nil {
virtualMachineLogger.Debug(err)
return err
}
virtualMachineLogger.Info("Destroyed")
return nil
}
func (o *ClusterUninstaller) deleteVirtualMachines(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Minute*30)
defer cancel()
o.Logger.Debug("Delete Virtual Machines")
found, err := o.client.ListVirtualMachines(ctx, o.InfraID)
if err != nil {
o.Logger.Debug(err)
return err
}
var errs []error
for _, vmMO := range found {
if err := o.deleteVirtualMachine(ctx, vmMO); err != nil {
errs = append(errs, err)
}
}
return utilerrors.NewAggregate(errs)
}
func (o *ClusterUninstaller) destroyCluster(ctx context.Context) (bool, error) {
stagedFuncs := [][]struct {
name string
execute func(context.Context) error
}{{
{name: "Stop virtual machines", execute: o.stopVirtualMachines},
}, {
{name: "Virtual Machines", execute: o.deleteVirtualMachines},
}, {
{name: "Folder", execute: o.deleteFolder},
}, {
{name: "Storage Policy", execute: o.deleteStoragePolicy},
{name: "Tag", execute: o.deleteTag},
{name: "Tag Category", execute: o.deleteTagCategory},
}}
stageFailed := false
for _, stage := range stagedFuncs {
if stageFailed {
break
}
for _, f := range stage {
err := f.execute(ctx)
if err != nil {
o.Logger.Debugf("%s: %v", f.name, err)
stageFailed = true
}
}
}
return !stageFailed, nil
}
// Run is the entrypoint to start the uninstall process.
func (o *ClusterUninstaller) Run() (*installertypes.ClusterQuota, error) {
defer o.client.Logout()
err := wait.PollUntilContextCancel(
context.Background(),
time.Second*10,
true,
o.destroyCluster,
)
if err != nil {
return nil, errors.Wrap(err, "failed to destroy cluster")
}
return nil, nil
}
|
package util
func DJBHash(str string) int32 {
hash := 5381
for _, c := range str {
hash += (hash << 5) + int(c)
}
return int32(hash & 0x7FFFFFFF)
}
|
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_multicluster_test
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/kubevela/pkg/controller/reconciler"
workflowv1alpha1 "github.com/kubevela/workflow/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/authentication/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/kubevela/pkg/util/rand"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
kubevelatypes "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
)
func initializeContext() (hubCtx context.Context, workerCtx context.Context) {
hubCtx = context.Background()
workerCtx = multicluster.ContextWithClusterName(hubCtx, WorkerClusterName)
return
}
func initializeContextAndNamespace() (hubCtx context.Context, workerCtx context.Context, namespace string) {
hubCtx, workerCtx = initializeContext()
// initialize test namespace
namespace = "test-mc-" + rand.RandomString(4)
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
Expect(k8sClient.Create(hubCtx, ns.DeepCopy())).Should(Succeed())
Expect(k8sClient.Create(workerCtx, ns.DeepCopy())).Should(Succeed())
return
}
func cleanUpNamespace(hubCtx context.Context, workerCtx context.Context, namespace string) {
hubNs := &corev1.Namespace{}
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Name: namespace}, hubNs)).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, hubNs)).Should(Succeed())
workerNs := &corev1.Namespace{}
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Name: namespace}, workerNs)).Should(Succeed())
Expect(k8sClient.Delete(workerCtx, workerNs)).Should(Succeed())
}
var _ = Describe("Test multicluster scenario", func() {
Context("Test vela cluster command", func() {
It("Test join cluster by X509 kubeconfig, rename it and detach it.", func() {
const oldClusterName = "test-worker-cluster"
const newClusterName = "test-cluster-worker"
_, err := execCommand("cluster", "list")
Expect(err).Should(Succeed())
_, err = execCommand("cluster", "join", "/tmp/worker.kubeconfig", "--name", oldClusterName)
Expect(err).Should(Succeed())
_, err = execCommand("cluster", "join", "/tmp/worker.kubeconfig", "--name", oldClusterName, "-y")
Expect(err).Should(Succeed())
out, err := execCommand("cluster", "list")
Expect(err).Should(Succeed())
Expect(out).Should(ContainSubstring(oldClusterName))
_, err = execCommand("cluster", "rename", oldClusterName, newClusterName)
Expect(err).Should(Succeed())
out, err = execCommand("cluster", "list")
Expect(err).Should(Succeed())
Expect(out).Should(ContainSubstring(newClusterName))
_, err = execCommand("cluster", "detach", newClusterName)
Expect(err).Should(Succeed())
out, err = execCommand("cluster", "list")
Expect(err).Should(Succeed())
Expect(out).ShouldNot(ContainSubstring(newClusterName))
})
It("Test manage labels for cluster", func() {
_, err := execCommand("cluster", "labels", "add", WorkerClusterName, "purpose=test,creator=e2e")
Expect(err).Should(Succeed())
out, err := execCommand("cluster", "list")
Expect(err).Should(Succeed())
Expect(out).Should(ContainSubstring("purpose"))
_, err = execCommand("cluster", "labels", "del", WorkerClusterName, "purpose")
Expect(err).Should(Succeed())
out, err = execCommand("cluster", "list")
Expect(err).Should(Succeed())
Expect(out).ShouldNot(ContainSubstring("purpose"))
})
It("Test alias for cluster", func() {
_, err := execCommand("cluster", "alias", WorkerClusterName, "alias-worker")
Expect(err).Should(Succeed())
out, err := execCommand("cluster", "list")
Expect(err).Should(Succeed())
Expect(out).Should(ContainSubstring("alias-worker"))
})
It("Test generate service account kubeconfig", func() {
_, workerCtx := initializeContext()
By("create service account kubeconfig in worker cluster")
key := time.Now().UnixNano()
serviceAccountName := fmt.Sprintf("test-service-account-%d", key)
serviceAccountNamespace := "kube-system"
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Namespace: serviceAccountNamespace, Name: serviceAccountName},
}
Expect(k8sClient.Create(workerCtx, serviceAccount)).Should(Succeed())
defer func() {
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: "kube-system", Name: serviceAccountName}, serviceAccount)).Should(Succeed())
Expect(k8sClient.Delete(workerCtx, serviceAccount)).Should(Succeed())
}()
clusterRoleBindingName := fmt.Sprintf("test-cluster-role-binding-%d", key)
clusterRoleBinding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{Name: clusterRoleBindingName},
Subjects: []rbacv1.Subject{{Kind: "ServiceAccount", Name: serviceAccountName, Namespace: serviceAccountNamespace}},
RoleRef: rbacv1.RoleRef{Name: "cluster-admin", APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole"},
}
Expect(k8sClient.Create(workerCtx, clusterRoleBinding)).Should(Succeed())
defer func() {
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: serviceAccountNamespace, Name: clusterRoleBindingName}, clusterRoleBinding)).Should(Succeed())
Expect(k8sClient.Delete(workerCtx, clusterRoleBinding)).Should(Succeed())
}()
serviceAccount = &corev1.ServiceAccount{}
By("Generating a token for SA")
tr := &v1.TokenRequest{}
token, err := k8sCli.CoreV1().ServiceAccounts(serviceAccountNamespace).CreateToken(workerCtx, serviceAccountName, tr, metav1.CreateOptions{})
Expect(err).Should(BeNil())
config, err := clientcmd.LoadFromFile(WorkerClusterKubeConfigPath)
Expect(err).Should(Succeed())
currentContext, ok := config.Contexts[config.CurrentContext]
Expect(ok).Should(BeTrue())
authInfo, ok := config.AuthInfos[currentContext.AuthInfo]
Expect(ok).Should(BeTrue())
authInfo.Token = token.Status.Token
authInfo.ClientKeyData = nil
authInfo.ClientCertificateData = nil
kubeconfigFilePath := fmt.Sprintf("/tmp/worker.sa-%d.kubeconfig", key)
Expect(clientcmd.WriteToFile(*config, kubeconfigFilePath)).Should(Succeed())
defer func() {
Expect(os.Remove(kubeconfigFilePath)).Should(Succeed())
}()
// try to join cluster with service account token based kubeconfig
clusterName := fmt.Sprintf("cluster-sa-%d", key)
_, err = execCommand("cluster", "join", kubeconfigFilePath, "--name", clusterName)
Expect(err).Should(Succeed())
_, err = execCommand("cluster", "detach", clusterName)
Expect(err).Should(Succeed())
})
It("Test vela cluster export-config", func() {
out, err := execCommand("cluster", "export-config")
Expect(err).Should(Succeed())
Expect(out).Should(ContainSubstring("name: " + WorkerClusterName))
})
})
Context("Test multi-cluster Application", func() {
var namespace string
var testNamespace string
var prodNamespace string
var hubCtx context.Context
var workerCtx context.Context
BeforeEach(func() {
hubCtx, workerCtx, namespace = initializeContextAndNamespace()
_, _, testNamespace = initializeContextAndNamespace()
_, _, prodNamespace = initializeContextAndNamespace()
})
AfterEach(func() {
cleanUpNamespace(hubCtx, workerCtx, namespace)
cleanUpNamespace(hubCtx, workerCtx, testNamespace)
cleanUpNamespace(hubCtx, workerCtx, prodNamespace)
})
It("Test deploy multi-cluster application with target", func() {
By("apply application")
app := &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-app-target"},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{{
Name: "test-busybox",
Type: "webservice",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"busybox","cmd":["sleep","86400"]}`)},
}},
Policies: []v1beta1.AppPolicy{{
Name: "topology-local",
Type: "topology",
Properties: &runtime.RawExtension{Raw: []byte(fmt.Sprintf(`{"clusters":["local"],"namespace":"%s"}`, testNamespace))},
}, {
Name: "topology-remote",
Type: "topology",
Properties: &runtime.RawExtension{Raw: []byte(fmt.Sprintf(`{"clusters":["%s"],"namespace":"%s"}`, WorkerClusterName, prodNamespace))},
}},
},
}
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Name: "test-busybox", Namespace: testNamespace}, &appsv1.Deployment{})).Should(Succeed())
g.Expect(k8sClient.Get(workerCtx, types.NamespacedName{Name: "test-busybox", Namespace: prodNamespace}, &appsv1.Deployment{})).Should(Succeed())
}, time.Minute).Should(Succeed())
})
It("Test re-deploy application with old revisions", func() {
By("apply application")
app := &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-app-target"},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{{
Name: "test-busybox",
Type: "webservice",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"busybox","cmd":["sleep","86400"]}`)},
}},
Policies: []v1beta1.AppPolicy{{
Name: "topology-local",
Type: "topology",
Properties: &runtime.RawExtension{Raw: []byte(fmt.Sprintf(`{"clusters":["local"],"namespace":"%s"}`, testNamespace))},
},
}}}
oam.SetPublishVersion(app, "alpha")
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Name: "test-busybox", Namespace: testNamespace}, &appsv1.Deployment{})).Should(Succeed())
}, time.Minute).Should(Succeed())
By("update application to new version")
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
app.Spec.Components[0].Name = "test-busybox-v2"
oam.SetPublishVersion(app, "beta")
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}, 15*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Name: "test-busybox-v2", Namespace: testNamespace}, &appsv1.Deployment{})).Should(Succeed())
err := k8sClient.Get(hubCtx, types.NamespacedName{Name: "test-busybox", Namespace: testNamespace}, &appsv1.Deployment{})
g.Expect(kerrors.IsNotFound(err)).Should(BeTrue())
}, time.Minute).Should(Succeed())
By("Re-publish application to v1")
_, err := execCommand("up", appKey.Name, "-n", appKey.Namespace, "--revision", appKey.Name+"-v1", "--publish-version", "v1.0")
Expect(err).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Name: "test-busybox", Namespace: testNamespace}, &appsv1.Deployment{})).Should(Succeed())
err := k8sClient.Get(hubCtx, types.NamespacedName{Name: "test-busybox-v2", Namespace: testNamespace}, &appsv1.Deployment{})
g.Expect(kerrors.IsNotFound(err)).Should(BeTrue())
}, 2*time.Minute).Should(Succeed())
})
It("Test applications sharing resources", func() {
createApp := func(name string) *v1beta1.Application {
return &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name},
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{{
Name: "shared-resource-" + name,
Type: "k8s-objects",
Properties: &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"shared"},"data":{"key":"value"}}]}`)},
}, {
Name: "no-shared-resource-" + name,
Type: "k8s-objects",
Properties: &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"non-shared-` + name + `"},"data":{"key":"value"}}]}`)},
}},
Policies: []v1beta1.AppPolicy{{
Type: "shared-resource",
Name: "shared-resource",
Properties: &runtime.RawExtension{Raw: []byte(`{"rules":[{"selector":{"componentNames":["shared-resource-` + name + `"]}}]}`)},
}},
},
}
}
app1 := createApp("app1")
Expect(k8sClient.Create(hubCtx, app1)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app1), app1)).Should(Succeed())
g.Expect(app1.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 10*time.Second).Should(Succeed())
app2 := createApp("app2")
Expect(k8sClient.Create(hubCtx, app2)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app2), app2)).Should(Succeed())
g.Expect(app2.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 10*time.Second).Should(Succeed())
app3 := createApp("app3")
Expect(k8sClient.Create(hubCtx, app3)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app3), app3)).Should(Succeed())
g.Expect(app3.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 10*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
cm := &corev1.ConfigMap{}
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, cm)).Should(Succeed())
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(SatisfyAll(ContainSubstring("app1"), ContainSubstring("app2"), ContainSubstring("app3")))
g.Expect(cm.GetLabels()[oam.LabelAppName]).Should(SatisfyAny(Equal("app1"), Equal("app2"), Equal("app3")))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app1"}, &corev1.ConfigMap{})).Should(Succeed())
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app2"}, &corev1.ConfigMap{})).Should(Succeed())
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Succeed())
}, 45*time.Second).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, app2)).Should(Succeed())
Eventually(func(g Gomega) {
cm := &corev1.ConfigMap{}
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, cm)).Should(Succeed())
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(SatisfyAll(ContainSubstring("app1"), ContainSubstring("app3")))
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).ShouldNot(SatisfyAny(ContainSubstring("app2")))
g.Expect(cm.GetLabels()[oam.LabelAppName]).Should(SatisfyAny(Equal("app1"), Equal("app3")))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app1"}, &corev1.ConfigMap{})).Should(Succeed())
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app2"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Succeed())
}, 10*time.Second).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, app1)).Should(Succeed())
Eventually(func(g Gomega) {
cm := &corev1.ConfigMap{}
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, cm)).Should(Succeed())
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).Should(SatisfyAll(ContainSubstring("app3")))
g.Expect(cm.GetAnnotations()[oam.AnnotationAppSharedBy]).ShouldNot(SatisfyAny(ContainSubstring("app1"), ContainSubstring("app2")))
g.Expect(cm.GetLabels()[oam.LabelAppName]).Should(Equal("app3"))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app1"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Succeed())
}, 10*time.Second).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, app3)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "shared"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "non-shared-app3"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
}, 10*time.Second).Should(Succeed())
})
It("Test applications with bad resource", func() {
bs, err := os.ReadFile("./testdata/app/app-bad-resource.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
ctx := context.Background()
Expect(k8sClient.Create(ctx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunningWorkflow))
g.Expect(len(app.Status.Workflow.Steps) > 0).Should(BeTrue())
g.Expect(app.Status.Workflow.Steps[0].Message).Should(ContainSubstring("is invalid"))
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: app.Name, oam.LabelAppNamespace: app.Namespace})).Should(Succeed())
g.Expect(len(rts.Items)).Should(Equal(0))
}, 20*time.Second).Should(Succeed())
Expect(k8sClient.Delete(ctx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Satisfy(kerrors.IsNotFound))
}, 10*time.Second).Should(Succeed())
})
It("Test applications with env and storage trait", func() {
bs, err := os.ReadFile("./testdata/app/app-with-env-and-storage.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
})
It("Test applications with gc policy change (onAppUpdate -> never)", func() {
bs, err := os.ReadFile("./testdata/app/app-gc-policy-change.yaml")
Expect(err).Should(Succeed())
appYaml := strings.ReplaceAll(string(bs), "TEST_NAMESPACE", testNamespace)
app := &v1beta1.Application{}
Expect(yaml.Unmarshal([]byte(appYaml), app)).Should(Succeed())
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
By("update gc policy to never")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app))
gcPolicy := &v1alpha1.GarbageCollectPolicySpec{}
g.Expect(json.Unmarshal(app.Spec.Policies[0].Properties.Raw, gcPolicy)).Should(Succeed())
gcPolicy.Rules[0].Strategy = v1alpha1.GarbageCollectStrategyNever
bs, err = json.Marshal(gcPolicy)
g.Expect(err).Should(Succeed())
app.Spec.Policies[0].Properties = &runtime.RawExtension{Raw: bs}
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}, 10*time.Second).Should(Succeed())
By("check app updated and resource still exists")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.ObservedGeneration).Should(Equal(int64(2)))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: testNamespace, Name: "gc-policy-test"}, &corev1.ConfigMap{})).Should(Succeed())
}, 20*time.Second).Should(Succeed())
By("update app to new object")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app))
app.Spec.Components[0].Properties = &runtime.RawExtension{Raw: []byte(`{"objects":[{"apiVersion":"v1","kind":"ConfigMap","metadata":{"name":"another"},"data":{"key":"new-val"}}]}`)}
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}).Should(Succeed())
By("check app updated and resource still exists")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.ObservedGeneration).Should(Equal(int64(3)))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: testNamespace, Name: "gc-policy-test"}, &corev1.ConfigMap{})).Should(Succeed())
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: testNamespace, Name: "another"}, &corev1.ConfigMap{})).Should(Succeed())
}, 20*time.Second).Should(Succeed())
By("delete app and check resource")
Eventually(func(g Gomega) {
g.Expect(client.IgnoreNotFound(k8sClient.Delete(hubCtx, app))).Should(Succeed())
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: testNamespace, Name: "gc-policy-test"}, &corev1.ConfigMap{})).Should(Succeed())
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: testNamespace, Name: "another"}, &corev1.ConfigMap{})).Should(Satisfy(kerrors.IsNotFound))
})
})
It("Test Application with env in webservice and labels & storage trait", func() {
bs, err := os.ReadFile("./testdata/app/app-with-env-labels-storage.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
deploy := &appsv1.Deployment{}
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "test"}, deploy)).Should(Succeed())
}, 15*time.Second).Should(Succeed())
Expect(deploy.GetLabels()["key"]).Should(Equal("val"))
Expect(len(deploy.Spec.Template.Spec.Containers[0].Env)).Should(Equal(1))
Expect(deploy.Spec.Template.Spec.Containers[0].Env[0].Name).Should(Equal("testKey"))
Expect(deploy.Spec.Template.Spec.Containers[0].Env[0].Value).Should(Equal("testValue"))
Expect(len(deploy.Spec.Template.Spec.Volumes)).Should(Equal(1))
})
It("Test application with collect-service-endpoint and export-data", func() {
By("create application")
bs, err := os.ReadFile("./testdata/app/app-collect-service-endpoint-and-export.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(testNamespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
By("test dispatched resource")
svc := &corev1.Service{}
Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "busybox"}, svc)).Should(Succeed())
host := "busybox." + testNamespace
cm := &corev1.ConfigMap{}
Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: app.Name}, cm)).Should(Succeed())
Expect(cm.Data["host"]).Should(Equal(host))
Expect(k8sClient.Get(workerCtx, client.ObjectKey{Namespace: testNamespace, Name: app.Name}, cm)).Should(Succeed())
Expect(cm.Data["host"]).Should(Equal(host))
})
It("Test application with workflow change will rerun", func() {
By("create application")
bs, err := os.ReadFile("./testdata/app/app-lite-with-workflow.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(testNamespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
app.Spec.Workflow.Steps[0].Properties = &runtime.RawExtension{Raw: []byte(`{"policies":["worker"]}`)}
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}, 10*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Satisfy(kerrors.IsNotFound))
g.Expect(k8sClient.Get(workerCtx, client.ObjectKey{Namespace: testNamespace, Name: "data-worker"}, &appsv1.Deployment{})).Should(Succeed())
}, 20*time.Second).Should(Succeed())
})
It("Test application with apply-component and cluster", func() {
By("create application")
bs, err := os.ReadFile("./testdata/app/app-component-with-cluster.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(testNamespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
Expect(k8sClient.Get(workerCtx, client.ObjectKey{Namespace: testNamespace, Name: "component-cluster"}, &appsv1.Deployment{})).Should(Succeed())
})
It("Test application with component using cluster context", func() {
By("Create definition")
bs, err := os.ReadFile("./testdata/def/cluster-config.yaml")
Expect(err).Should(Succeed())
def := &v1beta1.ComponentDefinition{}
Expect(yaml.Unmarshal(bs, def)).Should(Succeed())
def.SetNamespace(kubevelatypes.DefaultKubeVelaNS)
Expect(k8sClient.Create(hubCtx, def)).Should(Succeed())
defKey := client.ObjectKeyFromObject(def)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, defKey, def)).Should(Succeed())
}, 5*time.Second).Should(Succeed())
bs, err = os.ReadFile("./testdata/app/app-component-with-cluster-context.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(testNamespace)
Eventually(func(g Gomega) { // informer may have latency for the added definition
g.Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
key := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
cm := &corev1.ConfigMap{}
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: testNamespace, Name: "test"}, cm)).Should(Succeed())
Expect(cm.Data["cluster"]).Should(Equal("local"))
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: testNamespace, Name: "test"}, cm)).Should(Succeed())
Expect(cm.Data["cluster"]).Should(Equal("cluster-worker"))
Expect(k8sClient.Delete(hubCtx, def)).Should(Succeed())
})
It("Test application with read-only policy", func() {
By("create deployment")
bs, err := os.ReadFile("./testdata/app/standalone/deployment-busybox.yaml")
Expect(err).Should(Succeed())
deploy := &appsv1.Deployment{}
Expect(yaml.Unmarshal(bs, deploy)).Should(Succeed())
deploy.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, deploy)).Should(Succeed())
By("create application")
bs, err = os.ReadFile("./testdata/app/app-readonly.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Workflow).ShouldNot(BeNil())
g.Expect(len(app.Status.Workflow.Steps)).ShouldNot(Equal(0))
g.Expect(app.Status.Workflow.Steps[0].Phase).Should(Equal(workflowv1alpha1.WorkflowStepPhaseFailed))
}, 20*time.Second).Should(Succeed())
By("update application")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
app.Spec.Components[0].Name = "busybox-ref"
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}, 20*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
By("delete application")
appKey := client.ObjectKeyFromObject(app)
deployKey := client.ObjectKeyFromObject(deploy)
Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(kerrors.IsNotFound(k8sClient.Get(hubCtx, appKey, app))).Should(BeTrue())
}, 20*time.Second).Should(Succeed())
Expect(k8sClient.Get(hubCtx, deployKey, deploy)).Should(Succeed())
})
It("Test application with take-over policy", func() {
By("create deployment")
bs, err := os.ReadFile("./testdata/app/standalone/deployment-busybox.yaml")
Expect(err).Should(Succeed())
deploy := &appsv1.Deployment{}
Expect(yaml.Unmarshal(bs, deploy)).Should(Succeed())
deploy.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, deploy)).Should(Succeed())
By("create application")
bs, err = os.ReadFile("./testdata/app/app-takeover.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 20*time.Second).Should(Succeed())
By("delete application")
appKey := client.ObjectKeyFromObject(app)
deployKey := client.ObjectKeyFromObject(deploy)
Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(kerrors.IsNotFound(k8sClient.Get(hubCtx, appKey, app))).Should(BeTrue())
g.Expect(kerrors.IsNotFound(k8sClient.Get(hubCtx, deployKey, deploy))).Should(BeTrue())
}, 20*time.Second).Should(Succeed())
})
It("Test application with input/output in deploy step", func() {
By("create application")
bs, err := os.ReadFile("./testdata/app/app-deploy-io.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, client.ObjectKeyFromObject(app), app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}, 30*time.Second).Should(Succeed())
By("Check input/output work properly")
cm := &corev1.ConfigMap{}
cmKey := client.ObjectKey{Namespace: namespace, Name: "deployment-msg"}
var (
ipLocal string
ipWorker string
)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, cmKey, cm)).Should(Succeed())
g.Expect(cm.Data["msg"]).Should(Equal("Deployment has minimum availability."))
ipLocal = cm.Data["ip"]
g.Expect(ipLocal).ShouldNot(BeEmpty())
}, 20*time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(workerCtx, cmKey, cm)).Should(Succeed())
g.Expect(cm.Data["msg"]).Should(Equal("Deployment has minimum availability."))
ipWorker = cm.Data["ip"]
g.Expect(ipWorker).ShouldNot(BeEmpty())
}, 20*time.Second).Should(Succeed())
Expect(ipLocal).ShouldNot(Equal(ipWorker))
By("delete application")
appKey := client.ObjectKeyFromObject(app)
Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(kerrors.IsNotFound(k8sClient.Get(hubCtx, appKey, app))).Should(BeTrue())
}, 20*time.Second).Should(Succeed())
})
It("Test application with failed gc and restart workflow", func() {
By("duplicate cluster")
secret := &corev1.Secret{}
const secretName = "disconnection-test"
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: WorkerClusterName}, secret)).Should(Succeed())
secret.SetName(secretName)
secret.SetResourceVersion("")
Expect(k8sClient.Create(hubCtx, secret)).Should(Succeed())
defer func() {
_ = k8sClient.Delete(hubCtx, secret)
}()
By("create cluster normally")
bs, err := os.ReadFile("./testdata/app/app-disconnection-test.yaml")
Expect(err).Should(Succeed())
app := &v1beta1.Application{}
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
key := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("disconnect cluster")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
secret.Data["endpoint"] = []byte("https://1.2.3.4:9999")
Expect(k8sClient.Update(hubCtx, secret)).Should(Succeed())
By("update application")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
app.Spec.Policies = nil
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(2))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
By("try update application again")
Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
if app.Annotations == nil {
app.Annotations = map[string]string{}
}
app.Annotations[oam.AnnotationPublishVersion] = "test"
Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, key, app)).Should(Succeed())
g.Expect(app.Status.LatestRevision).ShouldNot(BeNil())
g.Expect(app.Status.LatestRevision.Revision).Should(Equal(int64(3)))
g.Expect(app.Status.ObservedGeneration).Should(Equal(app.Generation))
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed())
By("clear disconnection cluster secret")
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: kubevelatypes.DefaultKubeVelaNS, Name: secretName}, secret)).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, secret)).Should(Succeed())
By("wait gc application completed")
Eventually(func(g Gomega) {
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(hubCtx, rts, client.MatchingLabels{oam.LabelAppName: key.Name, oam.LabelAppNamespace: key.Namespace})).Should(Succeed())
cnt := 0
for _, item := range rts.Items {
if item.Spec.Type == v1beta1.ResourceTrackerTypeVersioned {
cnt++
}
}
g.Expect(cnt).Should(Equal(1))
}).WithTimeout(30 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
})
It("Test application with gc policy and shared-resource policy", func() {
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-gc-shared.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
g.Expect(k8sClient.Get(hubCtx, appKey, &corev1.ConfigMap{})).Should(Succeed())
}).WithTimeout(10 * time.Second).Should(Succeed())
Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(kerrors.IsNotFound(k8sClient.Get(hubCtx, appKey, app))).Should(BeTrue())
g.Expect(k8sClient.Get(hubCtx, appKey, &corev1.ConfigMap{})).Should(Succeed())
}).WithTimeout(10 * time.Second).Should(Succeed())
})
It("Test application skip webservice component health check", func() {
td := &v1beta1.TraitDefinition{
ObjectMeta: metav1.ObjectMeta{Name: "ignore-health-check", Namespace: namespace},
Spec: v1beta1.TraitDefinitionSpec{
Schematic: &common.Schematic{CUE: &common.CUE{
Template: `
patch: metadata: annotations: "app.oam.dev/disable-health-check": parameter.key
parameter: key: string
`,
}},
Status: &common.Status{HealthPolicy: `isHealth: context.parameter.key == "true"`},
},
}
Expect(k8sClient.Create(hubCtx, td)).Should(Succeed())
app := &v1beta1.Application{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: namespace},
Spec: v1beta1.ApplicationSpec{Components: []common.ApplicationComponent{{
Type: "webservice",
Name: "test",
Properties: &runtime.RawExtension{Raw: []byte(`{"image":"bad"}`)},
Traits: []common.ApplicationTrait{{
Type: "ignore-health-check",
Properties: &runtime.RawExtension{Raw: []byte(`{"key":"false"}`)},
}},
}}},
}
Eventually(func(g Gomega) { // in case the trait definition has not been watched by vela-core
g.Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
}).WithTimeout(10 * time.Second).WithPolling(2 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
g.Expect(len(app.Status.Services) > 0).Should(BeTrue())
g.Expect(len(app.Status.Services[0].Traits) > 0).Should(BeTrue())
g.Expect(app.Status.Services[0].Traits[0].Healthy).Should(BeFalse())
}).WithTimeout(10 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
app.Spec.Components[0].Traits[0].Properties.Raw = []byte(`{"key":"true"}`)
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}).WithTimeout(10 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
g.Expect(len(app.Status.Services) > 0).Should(BeTrue())
g.Expect(len(app.Status.Services[0].Traits) > 0).Should(BeTrue())
g.Expect(app.Status.Services[0].Traits[0].Healthy).Should(BeTrue())
}).WithTimeout(20 * time.Second).Should(Succeed())
})
It("Test pause application", func() {
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-pause.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Expect(k8sClient.Create(hubCtx, app)).Should(Succeed())
time.Sleep(10 * time.Second)
appKey := client.ObjectKeyFromObject(app)
Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
Expect(app.Status.Workflow).Should(BeNil())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
reconciler.SetPause(app, false)
g.Expect(k8sClient.Update(hubCtx, app)).Should(Succeed())
}).WithTimeout(5 * time.Second).WithPolling(time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Get(hubCtx, appKey, app)).Should(Succeed())
g.Expect(app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithTimeout(15 * time.Second).WithPolling(3 * time.Second).Should(Succeed())
Expect(k8sClient.Delete(hubCtx, app)).Should(Succeed())
})
It("Test application carrying deploy step with inline policy", func() {
ctx := context.Background()
wsDef := &v1beta1.WorkflowStepDefinition{}
bs, err := os.ReadFile("./testdata/def/inline-deploy.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, wsDef)).Should(Succeed())
wsDef.SetNamespace(namespace)
Expect(k8sClient.Create(ctx, wsDef)).Should(Succeed())
app := &v1beta1.Application{}
bs, err = os.ReadFile("./testdata/app/app-carrying-deploy-step-with-inline-policy.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
_deploy := &appsv1.Deployment{}
Expect(k8sClient.Get(ctx, appKey, _deploy)).Should(Succeed())
Expect(int(*_deploy.Spec.Replicas)).Should(Equal(0))
})
It("Test application with multiple gc & shared-resource policies", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-multi-resource-policies.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
app2 := app.DeepCopy()
app2.SetName("test-2")
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app2)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
appKey2 := client.ObjectKeyFromObject(app2)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey2, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(client.IgnoreNotFound(k8sClient.Get(ctx, appKey, _app))).Should(Succeed())
g.Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, _app))).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(client.IgnoreNotFound(k8sClient.Get(ctx, appKey2, _app))).Should(Succeed())
g.Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, _app))).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(kerrors.IsNotFound(k8sClient.Get(ctx, appKey, _app))).Should(BeTrue())
g.Expect(kerrors.IsNotFound(k8sClient.Get(ctx, appKey2, _app))).Should(BeTrue())
g.Expect(k8sClient.Get(ctx, appKey, &corev1.ConfigMap{})).Should(Succeed())
g.Expect(k8sClient.Get(ctx, appKey, &corev1.Secret{})).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
})
It("Test application with anonymous policy", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-anonymous-policies.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
_deploy := &appsv1.Deployment{}
Expect(k8sClient.Get(workerCtx, appKey, _deploy)).Should(Succeed())
Expect(int(*_deploy.Spec.Replicas)).Should(Equal(0))
})
It("Test application with customized application revision limit", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-lite.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("update app and should have two revisions")
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
_app.Spec.Components[0].Name = "dw"
g.Expect(k8sClient.Update(ctx, _app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
_revs := &v1beta1.ApplicationRevisionList{}
g.Expect(k8sClient.List(ctx, _revs, client.InNamespace(namespace))).Should(Succeed())
g.Expect(len(_revs.Items)).Should(Equal(2))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("update app with gc policy and should have one revision")
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
_app.Spec.Components[0].Name = "dw2"
_app.Spec.Policies = []v1beta1.AppPolicy{{
Type: "garbage-collect",
Name: "gc",
Properties: &runtime.RawExtension{Raw: []byte(`{"applicationRevisionLimit":0}`)},
}}
g.Expect(k8sClient.Update(ctx, _app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
_revs := &v1beta1.ApplicationRevisionList{}
g.Expect(k8sClient.List(ctx, _revs, client.InNamespace(namespace))).Should(Succeed())
g.Expect(len(_revs.Items)).Should(Equal(1))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
})
It("Test application with resource-update policy", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-recreate-test.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("update configmap")
Eventually(func(g Gomega) {
cm := &corev1.ConfigMap{}
g.Expect(k8sClient.Get(ctx, appKey, cm)).Should(Succeed())
cm.Data["extra"] = "extra-val"
g.Expect(k8sClient.Update(ctx, cm)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("update application")
Expect(yaml.Unmarshal([]byte(strings.ReplaceAll(strings.ReplaceAll(string(bs), "key: dgo=", "key: dnZ2Cg=="), "key: val", "key: val2")), app)).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
app.ResourceVersion = _app.ResourceVersion
g.Expect(k8sClient.Update(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("validate updated result")
Eventually(func(g Gomega) {
cm := &corev1.ConfigMap{}
g.Expect(k8sClient.Get(ctx, appKey, cm)).Should(Succeed())
g.Expect(len(cm.Data)).Should(Equal(1))
secret := &corev1.Secret{}
g.Expect(k8sClient.Get(ctx, appKey, secret)).Should(Succeed())
g.Expect(string(secret.Data["key"])).Should(Equal("vvv\n"))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
})
It("Test application apply resources into managed cluster without installing CRD on the control plane", func() {
ctx := context.Background()
crd := &unstructured.Unstructured{}
bs, err := os.ReadFile("./testdata/kube/sample-crd.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, crd)).Should(Succeed())
Expect(client.IgnoreAlreadyExists(k8sClient.Create(workerCtx, crd))).Should(Succeed())
app := &v1beta1.Application{}
bs, err = os.ReadFile("./testdata/app/app-remote-resource.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
obj := &unstructured.Unstructured{}
obj.SetAPIVersion("sample.custom.io/v1alpha1")
obj.SetKind("Foo")
Expect(k8sClient.Get(workerCtx, appKey, obj)).Should(Succeed())
Expect(obj.Object["spec"].(map[string]interface{})["key"]).Should(Equal("value"))
})
It("Test application with fixed cluster to dispatch", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-with-fixed-location.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Expect(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "x"}, &corev1.ConfigMap{})).Should(Succeed())
Expect(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: namespace, Name: "y"}, &corev1.ConfigMap{})).Should(Succeed())
By("Deleting")
_app := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
Expect(k8sClient.Delete(ctx, _app)).Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(kerrors.IsNotFound(k8sClient.Get(ctx, appKey, _app))).Should(BeTrue())
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Expect(kerrors.IsNotFound(k8sClient.Get(hubCtx, types.NamespacedName{Namespace: namespace, Name: "x"}, &corev1.ConfigMap{}))).Should(BeTrue())
Expect(kerrors.IsNotFound(k8sClient.Get(workerCtx, types.NamespacedName{Namespace: namespace, Name: "y"}, &corev1.ConfigMap{}))).Should(BeTrue())
})
It("Test application with garbage-collect propagation setting", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-with-custom-gc-propagation.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("Deleting")
_app := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
Expect(k8sClient.Delete(ctx, _app)).Should(Succeed())
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(kerrors.IsNotFound(k8sClient.Get(ctx, appKey, _app))).Should(BeTrue())
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
Eventually(func(g Gomega) {
pods := &corev1.PodList{}
g.Expect(k8sClient.List(ctx, pods, client.InNamespace(namespace))).Should(Succeed())
g.Expect(len(pods.Items)).Should(Equal(1))
g.Expect(pods.Items[0].Name).Should(ContainSubstring("orphan"))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
})
It("Test application revision gc block application gc", func() {
ctx := context.Background()
app := &v1beta1.Application{}
bs, err := os.ReadFile("./testdata/app/app-lite.yaml")
Expect(err).Should(Succeed())
Expect(yaml.Unmarshal(bs, app)).Should(Succeed())
app.SetNamespace(namespace)
Eventually(func(g Gomega) {
g.Expect(k8sClient.Create(ctx, app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(5 * time.Second).Should(Succeed())
appKey := client.ObjectKeyFromObject(app)
Eventually(func(g Gomega) {
_app := &v1beta1.Application{}
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
g.Expect(_app.Status.Phase).Should(Equal(common.ApplicationRunning))
}).WithPolling(2 * time.Second).WithTimeout(20 * time.Second).Should(Succeed())
By("Add finalizer to application revision")
Eventually(func(g Gomega) {
_rev := &v1beta1.ApplicationRevision{}
g.Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appKey.Name + "-v1"}, _rev)).Should(Succeed())
_rev.SetFinalizers([]string{"mine"})
g.Expect(k8sClient.Update(ctx, _rev)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
By("Deleting")
_app := &v1beta1.Application{}
Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
Expect(k8sClient.Delete(ctx, _app)).Should(Succeed())
By("Check application existing after rt recycled")
Eventually(func(g Gomega) {
rts := &v1beta1.ResourceTrackerList{}
g.Expect(k8sClient.List(ctx, rts, client.MatchingLabels{oam.LabelAppName: _app.Name, oam.LabelAppNamespace: _app.Namespace})).Should(Succeed())
g.Expect(len(rts.Items)).Should(Equal(0))
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
Consistently(func(g Gomega) {
g.Expect(k8sClient.Get(ctx, appKey, _app)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
By("Remove finalizer from application revision")
Eventually(func(g Gomega) {
_rev := &v1beta1.ApplicationRevision{}
g.Expect(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appKey.Name + "-v1"}, _rev)).Should(Succeed())
_rev.SetFinalizers([]string{})
g.Expect(k8sClient.Update(ctx, _rev)).Should(Succeed())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
By("Check application deletion")
Eventually(func(g Gomega) {
g.Expect(kerrors.IsNotFound(k8sClient.Get(ctx, appKey, _app))).Should(BeTrue())
g.Expect(kerrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: appKey.Name + "-v1"}, &v1beta1.ApplicationRevision{}))).Should(BeTrue())
}).WithPolling(2 * time.Second).WithTimeout(10 * time.Second).Should(Succeed())
})
})
})
|
package common
// BotMessage is an exported type that.
type BotMessage struct {
Channel string
User string
Message string
}
// NewMessage creates and returns objects of
// the exported type Message.
func NewMessage(user, channel, message string) *BotMessage {
msg := &BotMessage {
Channel: channel,
User: user,
Message: message,
}
return msg
}
|
package watcher
import (
"github.com/pubg/kube-image-deployer/controller"
"github.com/pubg/kube-image-deployer/interfaces"
pkgRuntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
type ApplyStrategicMergePatch = controller.ApplyStrategicMergePatch
func NewWatcher(
name string,
stop chan struct{},
listWatcher cache.ListerWatcher,
objType pkgRuntime.Object,
imageNotifier interfaces.IImageNotifier,
controllerWatchKey string,
applyStrategicMergePatch ApplyStrategicMergePatch,
) {
controller := createDefaultController(name, stop, listWatcher, objType, imageNotifier, controllerWatchKey, applyStrategicMergePatch)
NewWatcherWithController(stop, controller)
}
func NewWatcherWithController(
stop chan struct{},
controller interfaces.IController,
) {
go controller.Run(1, stop) // Let's start the controller
<-stop
}
func createDefaultController(
name string,
stop chan struct{},
listWatcher cache.ListerWatcher,
objType pkgRuntime.Object,
imageNotifier interfaces.IImageNotifier,
controllerWatchKey string,
applyStrategicMergePatch ApplyStrategicMergePatch,
) *controller.Controller {
// create the workqueue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
// Bind the workqueue to a cache with the help of an informer. This way we make sure that
// whenever the cache is updated, the pod key is added to the workqueue.
// Note that when we finally process the item from the workqueue, we might see a newer version
// of the Pod than the version which was responsible for triggering the update.
indexer, informer := cache.NewIndexerInformer(listWatcher, objType, 0, cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
queue.Add(key)
}
},
UpdateFunc: func(old interface{}, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(new)
if err == nil {
queue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
// key function.
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
queue.Add(key)
}
},
}, cache.Indexers{})
controllerOpt := controller.ControllerOpt{
Resource: name,
ObjType: objType,
ApplyStrategicMergePatch: applyStrategicMergePatch,
Queue: queue,
Indexer: indexer,
Informer: informer,
ImageNotifier: imageNotifier,
ControllerWatchKey: controllerWatchKey,
}
controller := controller.NewController(controllerOpt)
return controller
}
|
/**
* Author: Tony.Shao(xiocode@gmail.com)
* Date: 13-02-27
* Version: 0.02
*/
package weigo
import (
"encoding/json"
"fmt"
"reflect"
)
func JSONParser(body string, result interface{}) (err error) {
body_bytes := []byte(body)
err = json.Unmarshal(body_bytes, result)
if err != nil {
return
}
return nil
}
func debugPrintln(message ...interface{}) {
fmt.Println(message)
}
func debugTypeof(element interface{}) interface{} {
return reflect.TypeOf(element)
}
func debugCheckError(err error) {
if err != nil {
debugPrintln(err)
}
}
|
package rkt
type KeyValue struct {
Name string `json:"name"`
Value string `json:"value"`
}
type MountPoint struct {
Name string `json:"name"`
Path string `json:"path"`
}
type Port struct {
Count int `json:"count"`
Name string `json:"name"`
Port int `json:"port"`
Protocol string `json:"protocol"`
SocketActivated bool `json:"socketActivated"`
}
type Application struct {
Exec []string `json:"exec"`
Group string `json:"group"`
MountPoints []MountPoint `json:"mountPoints"`
Ports []Port `json:"ports"`
User string `json:"user"`
}
type RktManifest struct {
AcKind string `json:"acKind"`
AcVersion string `json:"acVersion"`
Annotations []KeyValue `json:"annotations"`
App Application `json:"app"`
Labels []KeyValue `json:"labels"`
Name string `json:"name"`
}
type ConsulCheckSettings struct {
Name string
Type string
Target string
Interval string
Timeout string
}
type ConsulSettings struct {
Port int
Dns string
Check ConsulCheckSettings
}
type Pod struct {
Uuid string
Name string
Image string
IpAddress string
Consul ConsulSettings
}
|
package FizzBuzzHandler
import (
"errors"
"leboncoin/model"
"strconv"
routing "github.com/qiangxue/fasthttp-routing"
)
type FizzBuzzHandler interface {
GetFizzBuzz(request *routing.Context) error
}
type defaultFizzBuzzHandler struct {
defaultLimit int64
}
func New(defaultLimit int64) FizzBuzzHandler {
return defaultFizzBuzzHandler{defaultLimit}
}
func (handler defaultFizzBuzzHandler) GetFizzBuzz(request *routing.Context) error {
int1, int2, limit, str1, str2, err := parseQueryString(request, handler.defaultLimit)
if err != nil {
return err
}
res := model.FizzBuzz(int1, int2, limit, str1, str2)
request.SetStatusCode(200)
request.Response.SetBody([]byte(res))
return nil
}
func parseQueryString(request *routing.Context, defaultLimit int64) (int1 int64, int2 int64, limit int64, str1 string, str2 string, err error) {
request.QueryArgs().VisitAll(func(key, value []byte) {
stringKey := string(key)
limit = defaultLimit
if stringKey == "int1" {
if int1, err = strconv.ParseInt(string(value), 10, 0); err != nil {
return
}
}
if stringKey == "int2" {
if int2, err = strconv.ParseInt(string(value), 10, 0); err != nil {
return
}
}
if stringKey == "limit" {
if limit, err = strconv.ParseInt(string(value), 10, 0); err != nil {
return
}
}
if stringKey == "str1" {
str1 = string(value)
}
if stringKey == "str2" {
str2 = string(value)
}
return
})
if err == nil && (int1 == 0 || int2 == 0 || str1 == "" || str2 == "") {
err = errors.New("Missing mandatory query string parameter")
}
return
}
|
package common
import (
"bytes"
"fmt"
"html/template"
"net/http"
)
func getTemplates(folder string, filenames []string, fm template.FuncMap) (tmpl *template.Template) {
var files []string
for _, file := range filenames {
files = append(files, fmt.Sprintf("templates/%s%s.html", folder, file))
}
if fm != nil {
tmpl = template.Must(template.New("").Funcs(fm).ParseFiles(files...))
return
}
tmpl = template.Must(template.ParseFiles(files...))
return
}
func GenerateHTML(writer http.ResponseWriter, data interface{}, filenames ...string) {
funcMap := template.FuncMap{"sb": createSidebar}
getTemplates("", filenames, funcMap).ExecuteTemplate(writer, "layout", data)
}
func GenerateMail(bufer *bytes.Buffer, data interface{}, filenames ...string) {
getTemplates("mail/", filenames, nil).ExecuteTemplate(bufer, "layout", data)
}
func createSidebar(manutag string) template.HTML {
return template.HTML(getSidebarHtml(sidebar.Sidebar, manutag))
}
|
package instance_test
import (
"encoding/json"
"errors"
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
boshlog "github.com/cloudfoundry/bosh-agent/logger"
bmdepl "github.com/cloudfoundry/bosh-micro-cli/deployment"
bmrel "github.com/cloudfoundry/bosh-micro-cli/release"
bmstemcell "github.com/cloudfoundry/bosh-micro-cli/stemcell"
fakecmd "github.com/cloudfoundry/bosh-agent/platform/commands/fakes"
fakesys "github.com/cloudfoundry/bosh-agent/system/fakes"
fakeuuid "github.com/cloudfoundry/bosh-agent/uuid/fakes"
fakebmblobstore "github.com/cloudfoundry/bosh-micro-cli/deployer/blobstore/fakes"
fakebmins "github.com/cloudfoundry/bosh-micro-cli/deployer/instance/fakes"
fakebmtemp "github.com/cloudfoundry/bosh-micro-cli/templatescompiler/fakes"
. "github.com/cloudfoundry/bosh-micro-cli/deployer/instance"
)
var _ = Describe("TemplatesSpecGenerator", func() {
var (
templatesSpecGenerator TemplatesSpecGenerator
fakeJobRenderer *fakebmtemp.FakeJobRenderer
fakeCompressor *fakecmd.FakeCompressor
fakeBlobstore *fakebmblobstore.FakeBlobstore
fakeBlobstoreFactory *fakebmblobstore.FakeBlobstoreFactory
fakeUUIDGenerator *fakeuuid.FakeGenerator
fakeSha1Calculator *fakebmins.FakeSha1Calculator
deploymentJob bmdepl.Job
stemcellJob bmstemcell.Job
extractedJob bmrel.Job
jobProperties map[string]interface{}
fs *fakesys.FakeFileSystem
logger boshlog.Logger
tempFile *os.File
compileDir string
extractDir string
)
BeforeEach(func() {
fakeJobRenderer = fakebmtemp.NewFakeJobRenderer()
fakeCompressor = fakecmd.NewFakeCompressor()
fakeCompressor.CompressFilesInDirTarballPath = "fake-tarball-path"
stemcellJob = bmstemcell.Job{
Name: "fake-job-name",
Templates: []bmstemcell.Blob{
{
Name: "first-job-name",
Version: "first-job-version",
SHA1: "first-job-sha1",
BlobstoreID: "first-job-blobstore-id",
},
{
Name: "second-job-name",
Version: "second-job-version",
SHA1: "second-job-sha1",
BlobstoreID: "second-job-blobstore-id",
},
{
Name: "third-job-name",
Version: "third-job-version",
SHA1: "third-job-sha1",
BlobstoreID: "third-job-blobstore-id",
},
},
}
fakeBlobstore = fakebmblobstore.NewFakeBlobstore()
fakeBlobstoreFactory = fakebmblobstore.NewFakeBlobstoreFactory()
fakeBlobstoreFactory.CreateBlobstore = fakeBlobstore
fakeUUIDGenerator = &fakeuuid.FakeGenerator{
GeneratedUuid: "fake-blob-id",
}
jobProperties = map[string]interface{}{
"fake-property-key": "fake-property-value",
}
fakeSha1Calculator = fakebmins.NewFakeSha1Calculator()
fs = fakesys.NewFakeFileSystem()
logger = boshlog.NewLogger(boshlog.LevelNone)
var err error
tempFile, err = fs.TempFile("fake-blob-temp-file")
Expect(err).ToNot(HaveOccurred())
fs.ReturnTempFile = tempFile
fs.TempDirDir = "/fake-tmp-dir"
// fake file system only supports one temp dir
compileDir = "/fake-tmp-dir"
extractDir = "/fake-tmp-dir"
deploymentJob = bmdepl.Job{
Templates: []bmdepl.ReleaseJobRef{
{
Name: "first-job-name",
},
{
Name: "third-job-name",
},
},
RawProperties: map[interface{}]interface{}{
"fake-property-key": "fake-property-value",
},
}
templatesSpecGenerator = NewTemplatesSpecGenerator(
fakeBlobstoreFactory,
fakeCompressor,
fakeJobRenderer,
fakeUUIDGenerator,
fakeSha1Calculator,
fs,
logger,
)
extractedJob = bmrel.Job{
Templates: map[string]string{
"director.yml.erb": "config/director.yml",
},
ExtractedPath: extractDir,
}
blobJobJSON, err := json.Marshal(extractedJob)
Expect(err).ToNot(HaveOccurred())
fakeCompressor.DecompressFileToDirCallBack = func() {
fs.WriteFile("/fake-tmp-dir/job.MF", blobJobJSON)
fs.WriteFile("/fake-tmp-dir/monit", []byte("fake-monit-contents"))
}
fakeCompressor.CompressFilesInDirTarballPath = "fake-tarball-path"
fakeSha1Calculator.SetCalculateBehavior(map[string]fakebmins.CalculateInput{
compileDir: fakebmins.CalculateInput{
Sha1: "fake-configuration-hash",
},
"fake-tarball-path": fakebmins.CalculateInput{
Sha1: "fake-archive-sha1",
},
})
})
Describe("Create", func() {
It("downloads only job template blobs from the blobstore that are specified in the manifest", func() {
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fakeBlobstore.GetInputs).To(Equal([]fakebmblobstore.GetInput{
{
BlobID: "first-job-blobstore-id",
DestinationPath: tempFile.Name(),
},
{
BlobID: "third-job-blobstore-id",
DestinationPath: tempFile.Name(),
},
}))
})
It("removes the tempfile for downloaded blobs", func() {
tempFile, err := fs.TempFile("fake-blob-temp-file")
Expect(err).ToNot(HaveOccurred())
fs.ReturnTempFile = tempFile
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fs.FileExists(tempFile.Name())).To(BeFalse())
})
It("decompressed job templates", func() {
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fakeCompressor.DecompressFileToDirTarballPaths[0]).To(Equal(tempFile.Name()))
Expect(fakeCompressor.DecompressFileToDirDirs[0]).To(Equal(extractDir))
})
It("renders job templates", func() {
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fakeJobRenderer.RenderInputs).To(Equal([]fakebmtemp.RenderInput{
{
SourcePath: extractDir,
DestinationPath: filepath.Join(compileDir, "first-job-name"),
Job: extractedJob,
Properties: map[string]interface{}{
"fake-property-key": "fake-property-value",
},
DeploymentName: "fake-deployment-name",
},
{
SourcePath: extractDir,
DestinationPath: filepath.Join(compileDir, "third-job-name"),
Job: extractedJob,
Properties: map[string]interface{}{
"fake-property-key": "fake-property-value",
},
DeploymentName: "fake-deployment-name",
},
}))
})
It("compresses rendered templates", func() {
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fakeCompressor.CompressFilesInDirDir).To(Equal(compileDir))
})
It("cleans up rendered tarball", func() {
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fs.FileExists("fake-tarball-path")).To(BeFalse())
})
It("uploads rendered jobs to the blobstore", func() {
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).ToNot(HaveOccurred())
Expect(templatesSpec).To(Equal(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}))
Expect(fakeBlobstoreFactory.CreateBlobstoreURL).To(Equal("fake-blobstore-url"))
Expect(fakeBlobstore.SaveInputs).To(Equal([]fakebmblobstore.SaveInput{
{
BlobID: "fake-blob-id",
SourcePath: "fake-tarball-path",
},
}))
})
Context("when creating temp directory fails", func() {
It("returns an error", func() {
fs.TempDirError = errors.New("fake-temp-dir-error")
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-temp-dir-error"))
Expect(templatesSpec).To(Equal(TemplatesSpec{}))
})
})
Context("when creating blobstore fails", func() {
It("returns an error", func() {
fakeBlobstoreFactory.CreateErr = errors.New("fake-blobstore-factory-create-error")
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-blobstore-factory-create-error"))
Expect(templatesSpec).To(Equal(TemplatesSpec{}))
})
})
Context("when getting blob from blobstore fails", func() {
It("returns an error", func() {
fakeBlobstore.GetErr = errors.New("fake-blobstore-get-error")
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-blobstore-get-error"))
Expect(templatesSpec).To(Equal(TemplatesSpec{}))
})
})
Context("when rendering job fails", func() {
It("returns an error", func() {
fakeJobRenderer.SetRenderBehavior("/fake-tmp-dir", errors.New("fake-render-error"))
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-render-error"))
Expect(templatesSpec).To(Equal(TemplatesSpec{}))
})
})
Context("when compressing rendered templates fails", func() {
It("returns an error", func() {
fakeJobRenderer.SetRenderBehavior("/fake-tmp-dir", errors.New("fake-render-error"))
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-render-error"))
Expect(templatesSpec).To(Equal(TemplatesSpec{}))
})
})
Context("when calculating sha1 fails", func() {
It("returns an error", func() {
fakeSha1Calculator.SetCalculateBehavior(map[string]fakebmins.CalculateInput{
"/fake-tmp-dir": fakebmins.CalculateInput{
Sha1: "",
Err: errors.New("fake-sha1-error"),
},
})
templatesSpec, err := templatesSpecGenerator.Create(deploymentJob, stemcellJob, "fake-deployment-name", jobProperties, "fake-blobstore-url")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-sha1-error"))
Expect(templatesSpec).To(Equal(TemplatesSpec{}))
})
})
})
})
|
package types
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
sdk "github.com/cosmos/cosmos-sdk/types"
)
func TestTakeFeePoolRewards(t *testing.T) {
// initialize
height := int64(0)
fp := InitialFeePool()
vi1 := NewValidatorDistInfo(valAddr1, height)
vi2 := NewValidatorDistInfo(valAddr2, height)
vi3 := NewValidatorDistInfo(valAddr3, height)
commissionRate1 := sdk.NewDecWithPrec(2, 2)
commissionRate2 := sdk.NewDecWithPrec(3, 2)
commissionRate3 := sdk.NewDecWithPrec(4, 2)
validatorTokens1 := sdk.NewDec(10)
validatorTokens2 := sdk.NewDec(40)
validatorTokens3 := sdk.NewDec(50)
totalBondedTokens := validatorTokens1.Add(validatorTokens2).Add(validatorTokens3)
// simulate adding some stake for inflation
height = 10
fp.ValPool = DecCoins{NewDecCoin("stake", 1000)}
vi1, fp = vi1.TakeFeePoolRewards(NewWithdrawContext(
fp, height, totalBondedTokens, validatorTokens1, commissionRate1))
require.True(sdk.DecEq(t, sdk.NewDec(900), fp.TotalValAccum.Accum))
assert.True(sdk.DecEq(t, sdk.NewDec(900), fp.ValPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(100-2), vi1.DelPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(2), vi1.ValCommission[0].Amount))
vi2, fp = vi2.TakeFeePoolRewards(NewWithdrawContext(
fp, height, totalBondedTokens, validatorTokens2, commissionRate2))
require.True(sdk.DecEq(t, sdk.NewDec(500), fp.TotalValAccum.Accum))
assert.True(sdk.DecEq(t, sdk.NewDec(500), fp.ValPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(400-12), vi2.DelPool[0].Amount))
assert.True(sdk.DecEq(t, vi2.ValCommission[0].Amount, sdk.NewDec(12)))
// add more blocks and inflation
height = 20
fp.ValPool[0].Amount = fp.ValPool[0].Amount.Add(sdk.NewDec(1000))
vi3, fp = vi3.TakeFeePoolRewards(NewWithdrawContext(
fp, height, totalBondedTokens, validatorTokens3, commissionRate3))
require.True(sdk.DecEq(t, sdk.NewDec(500), fp.TotalValAccum.Accum))
assert.True(sdk.DecEq(t, sdk.NewDec(500), fp.ValPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(1000-40), vi3.DelPool[0].Amount))
assert.True(sdk.DecEq(t, vi3.ValCommission[0].Amount, sdk.NewDec(40)))
}
func TestWithdrawCommission(t *testing.T) {
// initialize
height := int64(0)
fp := InitialFeePool()
vi := NewValidatorDistInfo(valAddr1, height)
commissionRate := sdk.NewDecWithPrec(2, 2)
validatorTokens := sdk.NewDec(10)
totalBondedTokens := validatorTokens.Add(sdk.NewDec(90)) // validator-1 is 10% of total power
// simulate adding some stake for inflation
height = 10
fp.ValPool = DecCoins{NewDecCoin("stake", 1000)}
// for a more fun staring condition, have an non-withdraw update
vi, fp = vi.TakeFeePoolRewards(NewWithdrawContext(
fp, height, totalBondedTokens, validatorTokens, commissionRate))
require.True(sdk.DecEq(t, sdk.NewDec(900), fp.TotalValAccum.Accum))
assert.True(sdk.DecEq(t, sdk.NewDec(900), fp.ValPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(100-2), vi.DelPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(2), vi.ValCommission[0].Amount))
// add more blocks and inflation
height = 20
fp.ValPool[0].Amount = fp.ValPool[0].Amount.Add(sdk.NewDec(1000))
vi, fp, commissionRecv := vi.WithdrawCommission(NewWithdrawContext(
fp, height, totalBondedTokens, validatorTokens, commissionRate))
require.True(sdk.DecEq(t, sdk.NewDec(1800), fp.TotalValAccum.Accum))
assert.True(sdk.DecEq(t, sdk.NewDec(1800), fp.ValPool[0].Amount))
assert.True(sdk.DecEq(t, sdk.NewDec(200-4), vi.DelPool[0].Amount))
assert.Zero(t, len(vi.ValCommission))
assert.True(sdk.DecEq(t, sdk.NewDec(4), commissionRecv[0].Amount))
}
|
package main
import (
"sort"
)
type house struct {
id uint64
district string
roomNumber uint64
price int64
distanceFromCenter uint64
}
func commonSortPart (Houses []house, compare func(a,b house)bool) []house{
ready := make([]house, len(Houses))
copy(ready,Houses)
sort.Slice(ready,func(i,j int)bool{
return compare(ready[i],ready[j])
})
return ready
}
func sortByPriceAsc (Houses []house)[]house{
return commonSortPart(Houses,func(a,b house)bool{
return a.price < b.price
})
}
func sortByPriceDesc (Houses []house)[]house{
return commonSortPart(Houses,func(a,b house)bool{
return a.price > b.price
})
}
func sortByDistrictAsc (Houses []house)[]house{
return commonSortPart(Houses,func(a,b house)bool{
return a.distanceFromCenter < b.distanceFromCenter
})
}
func sortByDistrictDesc (Houses []house)[]house{
return commonSortPart(Houses,func(a,b house)bool{
return a.distanceFromCenter > b.distanceFromCenter
})
}
func searchBy (Houses []house,insertNeededPart func(home house) bool)[]house {
result := make([]house, 0)
for _,home:=range Houses {
if insertNeededPart(home) {
result = append(result, home)
}
}
if len(result) == 0 {
return make([]house,0)
}
return result
}
func searchByMaxPrice (Houses []house, limit int64)[]house{
return searchBy(Houses,func(home house) bool{
return home.price <= limit
})
}
func searchByIntervalPrice (Houses []house, lowerLimit,upperLimit int64)[]house{
return searchBy(Houses,func(home house) bool{
return lowerLimit<= home.price && home.price <= upperLimit
})
}
func searchByDistrict(Houses []house, neededDistrict string) []house{
return searchBy(Houses,func(home house) bool{
return home.district == neededDistrict
})
}
func searchByDistricts(Houses []house, neededDistricts []string) []house{
return searchBy(Houses,func(home house) bool{
for _,val :=range neededDistricts {
if home.district == val{
return true
}
}
return false
})
}
func main() {
}
|
package web
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
pb "github.com/autograde/aguis/ag"
"github.com/autograde/aguis/ci"
)
func TestParseWithInvalidDir(t *testing.T) {
const dir = "invalid/dir"
_, err := parseAssignments(dir, 0)
if err == nil {
t.Errorf("want no such file or directory error, got nil")
}
}
const (
y1 = `assignmentid: 1
name: "For loops"
language: "Go"
deadline: "27-08-2017 12:00"
autoapprove: false
`
y2 = `assignmentid: 2
name: "Nested loops"
language: "Java"
deadline: "27-08-2018 12:00"
autoapprove: false
`
)
func TestParse(t *testing.T) {
testsDir, err := ioutil.TempDir("", pb.TestsRepo)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(testsDir)
job := &ci.Job{
Commands: []string{
"cd " + testsDir,
"mkdir lab1",
"mkdir lab2",
},
}
runner := ci.Local{}
_, err = runner.Run(context.Background(), job, "")
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(testsDir, "lab1", "assignment.yaml"), []byte(y1), 0644)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(testsDir, "lab2", "assignment.yaml"), []byte(y2), 0644)
if err != nil {
t.Fatal(err)
}
wantAssignment1 := &pb.Assignment{
Name: "For loops",
Language: "go",
Deadline: "27-08-2017 12:00",
AutoApprove: false,
Order: 1,
}
wantAssignment2 := &pb.Assignment{
Name: "Nested loops",
Language: "java",
Deadline: "27-08-2018 12:00",
AutoApprove: false,
Order: 2,
}
assignments, err := parseAssignments(testsDir, 0)
if err != nil {
t.Fatal(err)
}
if len(assignments) < 1 {
t.Error("have 0 assignments, want 2")
}
if !reflect.DeepEqual(assignments[0], wantAssignment1) {
t.Errorf("\nhave %+v \nwant %+v", assignments[0], wantAssignment1)
}
if !reflect.DeepEqual(assignments[1], wantAssignment2) {
t.Errorf("\nhave %+v \nwant %+v", assignments[1], wantAssignment2)
}
}
|
package service
import (
"github.com/social-network/subscan-plugin/example/system/dao"
"github.com/social-network/subscan-plugin/example/system/model"
"github.com/social-network/subscan-plugin/storage"
"github.com/social-network/subscan-plugin/tools"
"github.com/social-network/substrate-api-rpc"
)
type Service struct {
d storage.Dao
}
func New(d storage.Dao) *Service {
return &Service{
d: d,
}
}
func (s *Service) GetExtrinsicError(hash string) *model.ExtrinsicError {
return dao.ExtrinsicError(s.d.DB(), hash)
}
func (s *Service) ExtrinsicFailed(spec, blockTimestamp int, blockHash string, event *storage.Event, paramEvent []storage.EventParam) {
type DispatchErrorModule struct {
Index int `json:"index"`
Error int `json:"error"`
}
for _, param := range paramEvent {
if param.Type == "DispatchError" {
var dr map[string]interface{}
tools.UnmarshalToAnything(&dr, param.Value)
if _, ok := dr["Error"]; ok {
_ = dao.CreateExtrinsicError(s.d.DB(),
event.ExtrinsicHash,
dao.CheckExtrinsicError(spec, s.d.SpecialMetadata(spec),
tools.IntFromInterface(dr["Module"]),
tools.IntFromInterface(dr["Error"])))
} else if _, ok := dr["Module"]; ok {
var module DispatchErrorModule
tools.UnmarshalToAnything(&module, dr["Module"])
_ = dao.CreateExtrinsicError(s.d.DB(),
event.ExtrinsicHash,
dao.CheckExtrinsicError(spec, s.d.SpecialMetadata(spec), module.Index, module.Error))
} else if _, ok := dr["BadOrigin"]; ok {
_ = dao.CreateExtrinsicError(s.d.DB(), event.ExtrinsicHash, &substrate.MetadataModuleError{Name: "BadOrigin"})
} else if _, ok := dr["CannotLookup"]; ok {
_ = dao.CreateExtrinsicError(s.d.DB(), event.ExtrinsicHash, &substrate.MetadataModuleError{Name: "CannotLookup"})
} else if _, ok := dr["Other"]; ok {
_ = dao.CreateExtrinsicError(s.d.DB(), event.ExtrinsicHash, &substrate.MetadataModuleError{Name: "Other"})
}
break
}
}
}
|
// +build bench
package hw10_program_optimization //nolint:golint,stylecheck
import (
"archive/zip"
"testing"
"time"
"github.com/stretchr/testify/require"
)
const (
mb uint64 = 1 << 20
memoryLimit uint64 = 30 * mb
timeLimit = 300 * time.Millisecond
)
// go test -v -count=1 -timeout=30s -tags bench .
func TestGetDomainStat_Time_And_Memory(t *testing.T) {
bench := func(b *testing.B) {
b.StopTimer()
r, err := zip.OpenReader("testdata/users.dat.zip")
require.NoError(t, err)
defer r.Close()
require.Equal(t, len(r.File), 1)
data, err := r.File[0].Open()
require.NoError(t, err)
b.StartTimer()
stat, err := GetDomainStat(data, "biz")
b.StopTimer()
require.NoError(t, err)
require.Equal(t, stat, expectedBizStat)
}
result := testing.Benchmark(bench)
mem := result.MemBytes
t.Logf("time used: %s", result.T)
t.Logf("memory used: %dMb", mem/mb)
require.Less(t, int64(result.T), int64(timeLimit), "the program is too slow")
require.Less(t, mem, memoryLimit, "the program is too greedy")
}
var expectedBizStat = DomainStat{
"abata.biz": 25,
"abatz.biz": 25,
"agimba.biz": 28,
"agivu.biz": 17,
"aibox.biz": 31,
"ailane.biz": 23,
"aimbo.biz": 25,
"aimbu.biz": 36,
"ainyx.biz": 35,
"aivee.biz": 25,
"avamba.biz": 21,
"avamm.biz": 17,
"avavee.biz": 35,
"avaveo.biz": 30,
"babbleblab.biz": 29,
"babbleopia.biz": 36,
"babbleset.biz": 28,
"babblestorm.biz": 29,
"blognation.biz": 32,
"blogpad.biz": 34,
"blogspan.biz": 21,
"blogtag.biz": 23,
"blogtags.biz": 34,
"blogxs.biz": 35,
"bluejam.biz": 36,
"bluezoom.biz": 27,
"brainbox.biz": 30,
"brainlounge.biz": 38,
"brainsphere.biz": 31,
"brainverse.biz": 39,
"brightbean.biz": 23,
"brightdog.biz": 32,
"browseblab.biz": 31,
"browsebug.biz": 25,
"browsecat.biz": 34,
"browsedrive.biz": 24,
"browsetype.biz": 34,
"browsezoom.biz": 29,
"bubblebox.biz": 19,
"bubblemix.biz": 38,
"bubbletube.biz": 34,
"buzzbean.biz": 26,
"buzzdog.biz": 30,
"buzzshare.biz": 26,
"buzzster.biz": 28,
"camido.biz": 27,
"camimbo.biz": 36,
"centidel.biz": 32,
"centimia.biz": 17,
"centizu.biz": 18,
"chatterbridge.biz": 30,
"chatterpoint.biz": 32,
"cogibox.biz": 30,
"cogidoo.biz": 34,
"cogilith.biz": 24,
"dabfeed.biz": 26,
"dabjam.biz": 30,
"dablist.biz": 30,
"dabshots.biz": 33,
"dabtype.biz": 21,
"dabvine.biz": 26,
"dabz.biz": 19,
"dazzlesphere.biz": 24,
"demimbu.biz": 27,
"demivee.biz": 39,
"demizz.biz": 30,
"devbug.biz": 20,
"devcast.biz": 35,
"devify.biz": 27,
"devpoint.biz": 26,
"devpulse.biz": 27,
"devshare.biz": 30,
"digitube.biz": 30,
"divanoodle.biz": 33,
"divape.biz": 32,
"divavu.biz": 28,
"dynabox.biz": 66,
"dynava.biz": 21,
"dynazzy.biz": 29,
"eabox.biz": 28,
"eadel.biz": 25,
"eamia.biz": 18,
"eare.biz": 30,
"eayo.biz": 30,
"eazzy.biz": 27,
"edgeblab.biz": 29,
"edgeclub.biz": 29,
"edgeify.biz": 36,
"edgepulse.biz": 21,
"edgetag.biz": 24,
"edgewire.biz": 29,
"eidel.biz": 33,
"eimbee.biz": 22,
"einti.biz": 19,
"eire.biz": 28,
"fadeo.biz": 35,
"fanoodle.biz": 23,
"fatz.biz": 30,
"feedbug.biz": 29,
"feedfire.biz": 30,
"feedfish.biz": 35,
"feedmix.biz": 31,
"feednation.biz": 24,
"feedspan.biz": 28,
"fivebridge.biz": 20,
"fivechat.biz": 29,
"fiveclub.biz": 23,
"fivespan.biz": 27,
"flashdog.biz": 20,
"flashpoint.biz": 35,
"flashset.biz": 30,
"flashspan.biz": 32,
"flipbug.biz": 27,
"flipopia.biz": 30,
"flipstorm.biz": 21,
"fliptune.biz": 29,
"gabcube.biz": 29,
"gabspot.biz": 24,
"gabtune.biz": 29,
"gabtype.biz": 29,
"gabvine.biz": 24,
"geba.biz": 24,
"gevee.biz": 23,
"gigabox.biz": 28,
"gigaclub.biz": 25,
"gigashots.biz": 26,
"gigazoom.biz": 29,
"innojam.biz": 26,
"innotype.biz": 27,
"innoz.biz": 24,
"izio.biz": 26,
"jabberbean.biz": 28,
"jabbercube.biz": 31,
"jabbersphere.biz": 55,
"jabberstorm.biz": 22,
"jabbertype.biz": 27,
"jaloo.biz": 35,
"jamia.biz": 33,
"janyx.biz": 33,
"jatri.biz": 18,
"jaxbean.biz": 28,
"jaxnation.biz": 21,
"jaxspan.biz": 27,
"jaxworks.biz": 30,
"jayo.biz": 44,
"jazzy.biz": 32,
"jetpulse.biz": 25,
"jetwire.biz": 26,
"jumpxs.biz": 29,
"kamba.biz": 30,
"kanoodle.biz": 19,
"kare.biz": 30,
"katz.biz": 62,
"kaymbo.biz": 34,
"kayveo.biz": 22,
"kazio.biz": 21,
"kazu.biz": 16,
"kimia.biz": 25,
"kwideo.biz": 17,
"kwilith.biz": 25,
"kwimbee.biz": 34,
"kwinu.biz": 15,
"lajo.biz": 20,
"latz.biz": 24,
"layo.biz": 32,
"lazz.biz": 27,
"lazzy.biz": 26,
"leenti.biz": 26,
"leexo.biz": 32,
"linkbridge.biz": 38,
"linkbuzz.biz": 24,
"linklinks.biz": 31,
"linktype.biz": 31,
"livefish.biz": 31,
"livepath.biz": 23,
"livetube.biz": 53,
"livez.biz": 28,
"meedoo.biz": 23,
"meejo.biz": 24,
"meembee.biz": 26,
"meemm.biz": 23,
"meetz.biz": 33,
"meevee.biz": 62,
"meeveo.biz": 27,
"meezzy.biz": 24,
"miboo.biz": 26,
"midel.biz": 28,
"minyx.biz": 25,
"mita.biz": 29,
"mudo.biz": 36,
"muxo.biz": 25,
"mybuzz.biz": 32,
"mycat.biz": 32,
"mydeo.biz": 20,
"mydo.biz": 30,
"mymm.biz": 21,
"mynte.biz": 54,
"myworks.biz": 27,
"nlounge.biz": 25,
"npath.biz": 33,
"ntag.biz": 28,
"ntags.biz": 32,
"oba.biz": 22,
"oloo.biz": 19,
"omba.biz": 26,
"ooba.biz": 27,
"oodoo.biz": 30,
"oozz.biz": 22,
"oyoba.biz": 27,
"oyoloo.biz": 30,
"oyonder.biz": 29,
"oyondu.biz": 23,
"oyope.biz": 24,
"oyoyo.biz": 32,
"ozu.biz": 18,
"photobean.biz": 25,
"photobug.biz": 57,
"photofeed.biz": 25,
"photojam.biz": 35,
"photolist.biz": 19,
"photospace.biz": 33,
"pixoboo.biz": 14,
"pixonyx.biz": 30,
"pixope.biz": 32,
"plajo.biz": 32,
"plambee.biz": 29,
"podcat.biz": 31,
"quamba.biz": 31,
"quatz.biz": 54,
"quaxo.biz": 25,
"quimba.biz": 25,
"quimm.biz": 33,
"quinu.biz": 60,
"quire.biz": 25,
"realblab.biz": 32,
"realbridge.biz": 30,
"realbuzz.biz": 22,
"realcube.biz": 57,
"realfire.biz": 37,
"reallinks.biz": 25,
"realmix.biz": 27,
"realpoint.biz": 22,
"rhybox.biz": 30,
"rhycero.biz": 28,
"rhyloo.biz": 32,
"rhynoodle.biz": 25,
"rhynyx.biz": 17,
"rhyzio.biz": 36,
"riffpath.biz": 21,
"riffpedia.biz": 33,
"riffwire.biz": 31,
"roodel.biz": 29,
"roombo.biz": 29,
"roomm.biz": 32,
"rooxo.biz": 34,
"shufflebeat.biz": 32,
"shuffledrive.biz": 25,
"shufflester.biz": 26,
"shuffletag.biz": 23,
"skaboo.biz": 35,
"skajo.biz": 26,
"skalith.biz": 30,
"skiba.biz": 22,
"skibox.biz": 27,
"skidoo.biz": 24,
"skilith.biz": 29,
"skimia.biz": 45,
"skinder.biz": 25,
"skinix.biz": 23,
"skinte.biz": 39,
"skipfire.biz": 29,
"skippad.biz": 26,
"skipstorm.biz": 30,
"skiptube.biz": 26,
"skivee.biz": 34,
"skyba.biz": 40,
"skyble.biz": 32,
"skyndu.biz": 32,
"skynoodle.biz": 28,
"skyvu.biz": 34,
"snaptags.biz": 33,
"tagcat.biz": 33,
"tagchat.biz": 37,
"tagfeed.biz": 30,
"tagopia.biz": 17,
"tagpad.biz": 28,
"tagtune.biz": 22,
"talane.biz": 22,
"tambee.biz": 24,
"tanoodle.biz": 38,
"tavu.biz": 37,
"tazz.biz": 27,
"tazzy.biz": 28,
"tekfly.biz": 31,
"teklist.biz": 26,
"thoughtbeat.biz": 30,
"thoughtblab.biz": 24,
"thoughtbridge.biz": 30,
"thoughtmix.biz": 33,
"thoughtsphere.biz": 20,
"thoughtstorm.biz": 38,
"thoughtworks.biz": 24,
"topdrive.biz": 35,
"topicblab.biz": 32,
"topiclounge.biz": 21,
"topicshots.biz": 30,
"topicstorm.biz": 22,
"topicware.biz": 35,
"topiczoom.biz": 38,
"trilia.biz": 28,
"trilith.biz": 25,
"trudeo.biz": 29,
"trudoo.biz": 28,
"trunyx.biz": 33,
"trupe.biz": 34,
"twimbo.biz": 19,
"twimm.biz": 30,
"twinder.biz": 28,
"twinte.biz": 33,
"twitterbeat.biz": 33,
"twitterbridge.biz": 20,
"twitterlist.biz": 26,
"twitternation.biz": 22,
"twitterwire.biz": 21,
"twitterworks.biz": 39,
"twiyo.biz": 37,
"vidoo.biz": 28,
"vimbo.biz": 21,
"vinder.biz": 31,
"vinte.biz": 34,
"vipe.biz": 25,
"vitz.biz": 26,
"viva.biz": 30,
"voolia.biz": 34,
"voolith.biz": 26,
"voomm.biz": 61,
"voonder.biz": 32,
"voonix.biz": 32,
"voonte.biz": 26,
"voonyx.biz": 25,
"wikibox.biz": 27,
"wikido.biz": 21,
"wikivu.biz": 23,
"wikizz.biz": 61,
"wordify.biz": 28,
"wordpedia.biz": 25,
"wordtune.biz": 27,
"wordware.biz": 19,
"yabox.biz": 24,
"yacero.biz": 34,
"yadel.biz": 27,
"yakidoo.biz": 21,
"yakijo.biz": 29,
"yakitri.biz": 26,
"yambee.biz": 20,
"yamia.biz": 17,
"yata.biz": 25,
"yodel.biz": 26,
"yodo.biz": 21,
"yodoo.biz": 24,
"yombu.biz": 29,
"yotz.biz": 26,
"youbridge.biz": 40,
"youfeed.biz": 32,
"youopia.biz": 22,
"youspan.biz": 59,
"youtags.biz": 22,
"yoveo.biz": 31,
"yozio.biz": 33,
"zava.biz": 29,
"zazio.biz": 18,
"zoombeat.biz": 28,
"zoombox.biz": 30,
"zoomcast.biz": 38,
"zoomdog.biz": 29,
"zoomlounge.biz": 25,
"zoomzone.biz": 32,
"zoonder.biz": 29,
"zoonoodle.biz": 27,
"zooveo.biz": 22,
"zoovu.biz": 38,
"zooxo.biz": 33,
"zoozzy.biz": 23,
} |
package practice02
import "fmt"
//值类型 var
//引用类型ref 指针,slice,map,chan
func TestValueAndRefType() {
var a = 100
var b chan int = make(chan int, 1)
fmt.Println("a=", a)
fmt.Println("b=",b)
modify(a)
fmt.Println("a=", a)
modifyPoint(&a)
fmt.Println("a=", a)
}
func modify(a int) {
a = 10
return
}
func modifyPoint(a *int) {
*a = 10
} |
package app
import (
"fmt"
"time"
"github.com/gin-gonic/gin"
validation "github.com/go-ozzo/ozzo-validation"
"github.com/ikasamt/zapp/zapp"
"github.com/jinzhu/gorm"
)
type Organization struct {
ID int
Name string
CreatedAt time.Time
UpdatedAt time.Time
beforeJSON gin.H
errors error
}
func (x Organization) String() string {
return fmt.Sprintf(`[%d] %s`, x.ID, x.Name)
}
func (x Organization) Validations() error {
return validation.ValidateStruct(&x,
validation.Field(&x.Name, validation.Required),
)
}
func (x *Organization) Setter(c *gin.Context) {
x.Name = zapp.GetParams(c, "name")
}
// 検索条件
func (x Organization) Search(q *gorm.DB) *gorm.DB {
if x.Name != `` {
q = q.Where("name LIKE ?", "%"+x.Name+"%")
}
return q
}
|
package mysql_test
import (
"mysql"
"os"
"testing"
)
func TestConnection(t *testing.T) {
opt := mysql.NewOption(os.Getenv("mysqlUser"), os.Getenv("mysqlPassword"), os.Getenv("mysqlAddress"), os.Getenv("mysqlDbName"))
var conn mysql.Connection
if err := conn.Connect(opt); err != nil {
t.Fail()
}
// conn.Close()
defer conn.Close()
if err := conn.ChangeDB(os.Getenv("newMysqlDbName")); err != nil {
t.Fail()
}
newConn, err := conn.Clone(os.Getenv("mysqlDbName"))
if err != nil || newConn.CheckConnection() != nil {
t.Fail()
}
// newConn.Close()
defer newConn.Close()
otherConn, err := mysql.New(opt)
if err != nil || otherConn.CheckConnection() != nil {
t.Fail()
}
defer otherConn.Close()
}
|
package api
import (
"github.com/itsmeadi/cart/src/entities/models"
"github.com/itsmeadi/cart/src/templatego"
"log"
"net/http"
"strconv"
)
func (api *API) ProductList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
//r.se
w.Header().Set("Content-Type", "text/html")
categoryIdStr := r.FormValue("category_id")
categoryId, err := strconv.ParseInt(categoryIdStr, 10, 64)
if err != nil || categoryId == 0 {
categoryId = 3
//AbortWithError(http.StatusBadRequest, errors.New("invalid category id"), &w)
//return
}
products, err := api.Interactor.ProductByCategory.GetProductArrByCategoryId(ctx, categoryId)
if err != nil {
log.Println("[API][ProductList][GetProductArrByCategoryId] Error=", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
email := GetUserEmail(ctx)
qtemplate := struct {
Response []models.Product
UserEmail string
}{
Response: products,
UserEmail: email,
}
if err := templatego.TemplateMap["index"].Execute(w, qtemplate); err != nil {
log.Printf("[ERROR] [Question] Render page error: %s\n", err)
}
}
|
package service
import (
"context"
"github.com/goscaffold/snowflake"
micro "github.com/micro/go-micro"
"github.com/xormplus/xorm"
"go.uber.org/zap"
"io"
"mix/test/utils/dispatcher"
"mix/test/utils/flags"
)
const (
BundlePath = "cold/btc"
)
func NewContext(ctx context.Context, db *xorm.Session, logger *zap.Logger) (r *Context) {
r = new(Context)
r.ctx = ctx
r.db = db
r.logger = logger
return
}
type Context struct {
ctx context.Context
db *xorm.Session
logger *zap.Logger
}
func NewBtc(serviceFlags *flags.Flags, microService micro.Service) (p *Btc) {
p = new(Btc)
p.init(serviceFlags, microService)
return
}
type Btc struct {
config *Config
dispatcher *dispatcher.Dispatcher
logger *zap.Logger
nodeId uint16
snowflake *snowflake.Generator
tracingCloser io.Closer
}
|
package webrpc
import (
"errors"
"log"
"net"
"reflect"
"time"
"github.com/gorilla/websocket"
)
// Common handler errors.
var (
ErrNotInChan = errors.New("not in channel")
)
const (
writeWait = 10 * time.Second
pingTimeout = 60 * time.Second
pingPeriod = 20 * time.Second
sendqLength = 1024
)
// Conn represents an RPC connection.
type Conn struct {
EventHandler
s *Server
ws *websocket.Conn
sendq chan Message
chans map[string]*channel
onError func(error)
onClose func()
}
func newConn(s *Server, ws *websocket.Conn) *Conn {
conn := Conn{
s: s,
ws: ws,
sendq: make(chan Message, sendqLength),
chans: map[string]*channel{},
}
conn.EventHandler = EventHandler{
handlers: map[string]reflect.Value{},
sender: &conn,
}
return &conn
}
// Close closes the underlying connection.
func (c *Conn) Close() error {
return c.ws.Close()
}
// Emit sends an event to the client.
func (c *Conn) Emit(name string, args ...interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(string); ok {
err = errors.New(e)
}
}
}()
msg, err := NewEvent(name, args...)
if err != nil {
return err
}
c.send(msg)
return nil
}
// Broadcast sends an event to a channel. This function fails if the user is not
// in the channel and returns ErrNotInChan. Note that this method doesn't send
// the event back to the user who sent it; for that, use Server.Broadcast
// instead.
func (c *Conn) Broadcast(chname, name string, args ...interface{}) error {
ch, ok := c.chans[chname]
if !ok {
return ErrNotInChan
}
msg, err := NewEvent(name, args...)
if err != nil {
return err
}
ch.broadcast(msg, c)
return nil
}
// Join adds the user to a channel.
func (c *Conn) Join(chname string) {
c.joinChan(c.s.getChannel(chname))
}
// Leave removes the user from a channel.
func (c *Conn) Leave(chname string) {
ch, ok := c.chans[chname]
if !ok {
return
}
c.leaveChan(ch)
}
// Addr returns the remote address of the underlying connection.
func (c *Conn) Addr() net.Addr {
return c.ws.RemoteAddr()
}
// readLoop is the read loop; note: this is where ALL callbacks run.
func (c *Conn) readLoop() {
defer func() {
if c.onClose != nil {
c.onClose()
}
c.ws.Close()
}()
c.ws.SetReadDeadline(time.Now().Add(pingTimeout))
for {
msg := Message{}
err := c.ws.ReadJSON(&msg)
if err != nil {
if c.onError != nil {
c.onError(err)
}
return
}
if msg.Type == Pong {
c.ws.SetReadDeadline(time.Now().Add(pingTimeout))
continue
}
if msg.Type == Init {
continue
}
// Dispatch message to handler.
err = c.dispatch(msg)
if err != nil {
if c.onError != nil {
c.onError(err)
}
return
}
}
}
// OnError sets the error handler for a socket.
func (c *Conn) OnError(handler func(error)) {
c.onError = handler
}
// OnClose sets the close handler for a socket.
func (c *Conn) OnClose(handler func()) {
c.onClose = handler
}
func (c *Conn) write(mt int, payload Message) error {
c.ws.SetWriteDeadline(time.Now().Add(writeWait))
return c.ws.WriteJSON(payload)
}
func (c *Conn) writeRaw(mt int) error {
c.ws.SetWriteDeadline(time.Now().Add(writeWait))
return c.ws.WriteMessage(websocket.TextMessage, []byte{})
}
func (c *Conn) writeLoop() {
ticker := time.NewTicker(pingPeriod)
defer func() {
// Make sure sends do not deadlock.
go func() {
for range c.sendq {
}
}()
// Leave all channels.
c.leaveChans()
ticker.Stop()
c.ws.Close()
// Also ends consumption loop above.
close(c.sendq)
}()
c.ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(pingTimeout))
if err := c.write(websocket.TextMessage, newInit()); err != nil {
return
}
for {
select {
case message, ok := <-c.sendq:
if !ok {
c.writeRaw(websocket.CloseMessage)
return
}
if err := c.write(websocket.TextMessage, message); err != nil {
return
}
case <-ticker.C:
if err := c.write(websocket.TextMessage, newPing()); err != nil {
return
}
}
}
}
func (c *Conn) send(msg Message) {
select {
case c.sendq <- msg:
return
default:
log.Println("sendq exceeded for " + c.Addr().String())
c.ws.Close()
}
}
|
package covid19
import (
"github.com/go-kit/kit/log"
stdopentracing "github.com/opentracing/opentracing-go"
generalEndpoint "github.com/tech-showcase/covid19-service/endpoint"
"github.com/tech-showcase/covid19-service/middleware"
"github.com/tech-showcase/covid19-service/service"
)
type (
Endpoint struct {
Get generalEndpoint.HTTPEndpoint
}
)
func NewCovid19Endpoint(svc service.Covid19Service, tracer stdopentracing.Tracer, logger log.Logger) Endpoint {
covid19Endpoint := Endpoint{}
getCovid19Endpoint := makeGetCovid19Endpoint(svc)
getCovid19Endpoint = middleware.ApplyTracerClient("getCovid19-endpoint", getCovid19Endpoint, tracer)
getCovid19Endpoint = middleware.ApplyLogger("getCovid19", getCovid19Endpoint, logger)
getCovid19Endpoint = middleware.ApplyMetrics("covid19", "get", getCovid19Endpoint)
covid19Endpoint.Get = generalEndpoint.HTTPEndpoint{
Endpoint: getCovid19Endpoint,
Decoder: decodeGetCovid19Request,
Encoder: encodeResponse,
}
return covid19Endpoint
}
|
package main
import (
"net/http"
"github.com/jmoiron/sqlx"
)
type DBDriver struct {
Conn *sqlx.DB
}
type API struct {
DB *DBDriver
EmailInfo *SendEmailInfo
}
//user input on login
type UserSignInData struct {
Email string `json:"email"`
Password string `json:"password"`
}
//user input for signup
type UserSignUpData struct {
Username string `json:"username"`
Email string `json: "email"`
Password string `json:"password"`
}
//result for login
type SuccessWithID struct {
ID string `json:"id"`
IsLoggedIn bool `json:"is_login"`
ErrorMsg string `json:"error_msg"`
}
//user details retrieved from db after login
type SignInCreds struct {
ID string `db:"id"`
PasswordHash []byte `db:"password_hash"`
}
type ResponseResult struct {
Success bool `json:"success"`
ErrorMsg string `json:"error_msg"`
}
//result for signup
type SignupResult struct {
IsSignup bool `json:"is_signup"`
ErrorMsg string `json:"error_msg"`
IsVerified bool `json:"is_verified"`
}
const (
UniqueConstraintUsername = "user_un_username"
UniqueConstraintEmail = "user_un_email"
)
//fetches all products from coinbase api and store in db
type Products struct {
ID string `json:"id"`
BaseCurrency string `json:"base_currency"`
QuoteCurrency string `json:"quote_currency"`
BaseMinSize string `json:"base_min_size"`
BaseMaxSize string `json:"base_max_size"`
QuoteIncrement string `json:"quote_increment"`
BaseIncrement string `json:"base_increment"`
DisplayName string `json:"display_name"`
MinMarketFunds string `json:"min_market_funds"`
MaxMarketFunds string `json:"max_market_funds"`
MarginEnabled bool `json:"margin_enabled"`
PostOnly bool `json:"post_only"`
LimitOnly bool `json:"limit_only"`
CancelOnly bool `json:"cancel_only"`
Status string `json:"status"`
StatusMessage string `json:"status_message"`
}
//retrieves all products from db
type ProductsList struct {
ID string `db:"id"`
BaseCurrency string `db:"base_currency"`
QuoteCurrency string `db:"quote_currency"`
BaseMinSize string `db:"base_min_size"`
BaseMaxSize string `db:"base_max_size"`
QuoteIncrement string `db:"quote_increment"`
BaseIncrement string `db:"base_increment"`
DisplayName string `db:"display_name"`
MinMarketFunds string `db:"min_market_funds"`
MaxMarketFunds string `db:"max_market_funds"`
MarginEnabled bool `db:"margin_enabled"`
PostOnly bool `db:"post_only"`
LimitOnly bool `db:"limit_only"`
CancelOnly bool `db:"cancel_only"`
Status string `db:"status"`
StatusMessage string `db:"status_message"`
}
type ProductTicker struct {
TradeID string `json:"string,trade_id"`
Price string `json:"price"`
Size string `json:"size"`
Time string `json:"time"`
Bid string `json:"bid"`
Ask string `json:"ask"`
Volume string `json:"volume"`
}
type TickerData struct {
ID string `db:"id"`
Price string `db:"price"`
Size string `db:"size"`
Time string `db:"time"`
Bid string `db:"bid"`
Ask string `db:"ask"`
Volume string `db:"volume"`
}
type SelectedTickerData struct {
ID string `"json:"id"`
Price string `json:"price"`
Size string `json:"size"`
Time string `json:"time"`
Bid string `json:"bid"`
Ask string `json:"ask"`
Volume string `json:"volume"`
}
type UserFavDB struct {
FavID string `db:"fav_id"`
UserID string `db:"user_id"`
TickerID string `db:"ticker_id"`
}
type UserFav struct {
ProductID string `json:"product_id"`
IsFav bool `json:"is_fav"`
}
type FavProducts struct {
ID string `db:"id"`
Price string `db:"price"`
Size string `db:"size"`
Time string `db:"time"`
Bid string `db:"bid"`
Ask string `db:"ask"`
Volume string `db:"volume"`
}
type SendEmailInfo struct {
EmailAPIKey string
EmailDomain string
Scheme string
ServerDomain string
}
type ResetPass struct {
CurrentPw string `json:"current_password"`
NewPw string `json:"new_password"`
}
type ForgotPass struct {
Email string `json:"email"`
}
type ResetPassTokenResult struct {
ResetPassToken string `db:"reset_pass_token"`
ErrorMsg string `json:"error_msg"`
}
type ForgotPassReset struct {
ResetPassToken string
NewPassword string
}
//check cookie
type CookieHandler func(w http.ResponseWriter, r *http.Request, userID string)
type CookieSession struct {
CheckedCookie bool `json:"checked_cookie"`
}
|
package main
import (
"admigo/common"
"crypto/tls"
"fmt"
"golang.org/x/crypto/acme/autocert"
"log"
"net/http"
"time"
)
func main() {
if common.Env().Debug {
startDevTLS()
return
}
startTLS()
}
func startDevTLS() {
e := common.Env()
fmt.Printf("[%s] Admigo v%s started at %s:%d\n", "debug",
common.Version(), e.Address, e.Port)
m := &autocert.Manager{}
go http.ListenAndServe(":http", m.HTTPHandler(http.HandlerFunc(httpRedirect)))
mux := getRouter()
server := &http.Server{
Addr: fmt.Sprintf("%s:%d", e.Address, e.Port),
Handler: mux,
ReadTimeout: time.Duration(e.ReadTimeout * int64(time.Second)),
WriteTimeout: time.Duration(e.WriteTimeout * int64(time.Second)),
MaxHeaderBytes: 1 << 20,
}
log.Fatalln(server.ListenAndServeTLS("./certs/cert.pem", "./certs/key.pem"))
}
func startTLS() {
e := common.Env()
fmt.Printf("Admigo v%s started at %s:%d\n", common.Version(), e.Address, e.Port)
m := &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(e.Address),
Cache: autocert.DirCache("./certs"),
}
go http.ListenAndServe(":http", m.HTTPHandler(http.HandlerFunc(httpRedirect)))
tlsConfig := &tls.Config{
ServerName: e.Address,
GetCertificate: m.GetCertificate,
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519,
},
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
},
}
mux := getRouter()
server := &http.Server{
Addr: fmt.Sprintf(":%d", e.Port),
Handler: mux,
ReadTimeout: time.Duration(e.ReadTimeout * int64(time.Second)),
WriteTimeout: time.Duration(e.WriteTimeout * int64(time.Second)),
IdleTimeout: time.Duration(e.IdleTimeout * int64(time.Second)),
TLSConfig: tlsConfig,
MaxHeaderBytes: 1 << 20,
}
log.Fatalln(server.ListenAndServeTLS("", ""))
}
|
package main
import "fmt"
func main() {
var arr1 [5]int
//当定义完数组后,其实数组的各个元素都有了默认值(数据类型的默认值int0float0boolfalsestring"")
fmt.Printf("数组的地址%p\n", &arr1)
fmt.Printf("数组第一个元素的地址%p\n", &arr1[0]) //第一个元素的地址等于数组变量arr1的地址
fmt.Printf("数组第二个元素的地址%p\n", &arr1[1]) //第二个元素 = 第一个元素地址+8(1个int占8个字节)
//四种定义数组
var array1 [3]int = [3]int{1, 2, 3}
var array2 = [3]int{1, 2, 3}
var array3 = [...]int{1, 2, 3}
var array4 = [3]string{1: "tome", 2: "jacke", 0: "marry"}
fmt.Println("array1", array1)
fmt.Println("array2", array2)
fmt.Println("array3", array3)
fmt.Println("array4", array4)
//遍历
for i, v := range array4 {
fmt.Printf("第%d的值为%v", i, v)
}
//数组引用
var arry5 = [5]int{1, 2, 3, 4, 5}
test1(arry5)
fmt.Println("值传递修改arry5后", arry5)
test2(&arry5)
fmt.Printf("arry5的地址为:%p", &arry5)
fmt.Println("引用传递修改arry5后", arry5)
}
func test1(arry [5]int) {
arry[1] = 100
arry[2] = 200
}
func test2(arry *[5]int) {
fmt.Printf("test2 arry地址%p\n", &arry)
fmt.Printf("test2 arry的值%p,类型为%T\n", arry, arry)
(*arry)[1] = 100
(*arry)[2] = 200
}
//数组
// 数组可以存放多个同一类型数据.数组也是一种数据类型,在Go中,数组是值类型
// 1)数组的定义
// var 数组名 [数组大小]数据类型
// var a [5]int
// 赋初值a[0] =1
// 总结:(1)数组的地址可以通过数组名来获取 &arra,数组的地址就是首地址
// (2)数组的第一个元素的地址,就是数组的首地址
// (3)数组的各个元素的地址间隔是依据数组的类型决定的,比如int64->8,int32->4
//2)四种数组初始化方式
// var array [3]int = [3]int{1,2,3}
// var array = [3]int{1,2,3}
// var array = [...]int{1,2,3}
// var array = [3]string{1:"tome",2:"jacke",0:"marry"}
// 3)数组的遍历
// (1)常规遍历
// (2)for-range遍历
// 4)数组的注意事项和细节
// (1)数组是多个相同类型数据的组合,一个数组一旦声明/定义了,其长度就是固定的,不能动态变化
// (2)var arr []int这时arr就是一个切片
// (3)数组中的元素可以是任何数据类型,包括值类型和引用类型,但是不能混用
// (4)数组创建后,如果没有赋值,有默认值
// 数值类型数组:默认为0
// 字符串数组:默认为""
// bool数组:默认为false
// (5)使用数组的步骤:1.声明数组并开辟空间2给数组各个元素赋值3使用数组
// (6)数组的下标是从0开始的
// (7)数组下标必须在指定范围内使用,否则报panic:数组越界,比如
// var arr[5]int则有效下标为0-4
// (8)Go的数组属于值类型,在默认情况下是值类型,因此会进行值拷贝
// (9)如果想在其他函数中,去修改原来的数组,可以使用引用传递(指针方式)
// (10)长度是数组的一部分,在传递函数参数时,需要考虑数组的长度,比如切片当数组传入,报错
|
package Problem0556
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// tcs is testcase slice
var tcs = []struct {
n int
ans int
}{
{
12443322,
13222344,
},
{
2321,
3122,
},
{
2147483467,
2147483476,
},
{
2147483647,
-1,
},
{
12,
21,
},
{
21,
-1,
},
// 可以有多个 testcase
}
func Test_nextGreaterElement(t *testing.T) {
ast := assert.New(t)
for _, tc := range tcs {
fmt.Printf("~~%v~~\n", tc)
ast.Equal(tc.ans, nextGreaterElement(tc.n), "输入:%v", tc)
}
}
func Benchmark_nextGreaterElement(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tc := range tcs {
nextGreaterElement(tc.n)
}
}
}
|
package exampleAsATest
import (
"fmt"
"testing"
)
func HelloWorld(name string) string {
return fmt.Sprintf("Hello, %s", name)
}
func ExampleHelloWorld(t *testing.T) {
returnedString := HelloWorld("Quick Quack!")
fmt.Sprintf(returnedString)
// Output: Hello, Quick Quack!
}
|
package otp
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base32"
"fmt"
"hash"
"math"
)
// otpauth://totp/Company:joe_example@gmail.com?secret=[...]&issuer=Company
type HOTP struct {
seed string
window int
counter int
tokenLength int
base32 bool
encoding func() hash.Hash
}
func NewHOTP(seed string) HOTP {
return HOTP{
seed: seed,
encoding: sha1.New,
window: 5,
tokenLength: 6,
base32: true,
}
}
func (self HOTP) Base32(base32 bool) HOTP {
self.base32 = base32
return self
}
func (self HOTP) Counter(counter int) HOTP {
self.counter = counter
return self
}
func (self HOTP) TokenLength(tokenLength int) HOTP {
self.tokenLength = tokenLength
return self
}
func (self HOTP) Window(window int) HOTP {
self.window = window
return self
}
func (self HOTP) Encoding(encoding func() hash.Hash) HOTP {
self.encoding = encoding
return self
}
func (self HOTP) Seed() []byte {
if self.base32 {
encodedSeed, _ := base32.StdEncoding.DecodeString(self.seed)
return encodedSeed
} else {
return []byte(self.seed)
}
}
func (self HOTP) HMAC() []byte {
hash := hmac.New(self.encoding, self.Seed())
hash.Write([]byte(counterToBytes(self.counter)))
return hash.Sum(nil)
}
func (self HOTP) Generate() string {
otp := truncate(self.HMAC()) % int(math.Pow10(self.tokenLength))
return fmt.Sprintf(fmt.Sprintf("%%0%dd", self.tokenLength), otp)
}
func (self HOTP) Check(otp string) (bool, int) {
for i := 0; i < self.window; i++ {
o := self.Generate()
if o == otp {
return true, int(self.counter)
}
self.counter++
}
return false, 0
}
func (self HOTP) Sync(otp1 string, otp2 string) (bool, int) {
self.window = 100
v, i := self.Check(otp1)
if !v {
return false, 0
}
self.counter = self.counter + i + 1
self.window = 1
v2, i2 := self.Check(otp2)
if v2 {
return true, i2 + 1
}
return false, 0
}
func truncate(hash []byte) int {
offset := int(hash[len(hash)-1] & 0xf)
return ((int(hash[offset]) & 0x7f) << 24) |
((int(hash[offset+1] & 0xff)) << 16) |
((int(hash[offset+2] & 0xff)) << 8) |
(int(hash[offset+3]) & 0xff)
}
func counterToBytes(counter int) (text []byte) {
text = make([]byte, 8)
for i := (len(text) - 1); i >= 0; i-- {
text[i] = byte(counter & 0xff)
counter = counter >> 8
}
return text
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package task
import (
"bytes"
"context"
"github.com/opentracing/opentracing-go"
"github.com/pingcap/errors"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/backup"
"github.com/pingcap/tidb/br/pkg/conn"
berrors "github.com/pingcap/tidb/br/pkg/errors"
"github.com/pingcap/tidb/br/pkg/glue"
"github.com/pingcap/tidb/br/pkg/metautil"
"github.com/pingcap/tidb/br/pkg/rtree"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/summary"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"go.uber.org/zap"
)
const (
flagKeyFormat = "format"
flagTiKVColumnFamily = "cf"
flagStartKey = "start"
flagEndKey = "end"
)
// RawKvConfig is the common config for rawkv backup and restore.
type RawKvConfig struct {
Config
StartKey []byte `json:"start-key" toml:"start-key"`
EndKey []byte `json:"end-key" toml:"end-key"`
CF string `json:"cf" toml:"cf"`
CompressionConfig
RemoveSchedulers bool `json:"remove-schedulers" toml:"remove-schedulers"`
}
// DefineRawBackupFlags defines common flags for the backup command.
func DefineRawBackupFlags(command *cobra.Command) {
command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex")
command.Flags().StringP(flagTiKVColumnFamily, "", "default", "backup specify cf, correspond to tikv cf")
command.Flags().StringP(flagStartKey, "", "", "backup raw kv start key, key is inclusive")
command.Flags().StringP(flagEndKey, "", "", "backup raw kv end key, key is exclusive")
command.Flags().String(flagCompressionType, "zstd",
"backup sst file compression algorithm, value can be one of 'lz4|zstd|snappy'")
command.Flags().Bool(flagRemoveSchedulers, false,
"disable the balance, shuffle and region-merge schedulers in PD to speed up backup")
// This flag can impact the online cluster, so hide it in case of abuse.
_ = command.Flags().MarkHidden(flagRemoveSchedulers)
}
// ParseFromFlags parses the raw kv backup&restore common flags from the flag set.
func (cfg *RawKvConfig) ParseFromFlags(flags *pflag.FlagSet) error {
format, err := flags.GetString(flagKeyFormat)
if err != nil {
return errors.Trace(err)
}
start, err := flags.GetString(flagStartKey)
if err != nil {
return errors.Trace(err)
}
cfg.StartKey, err = utils.ParseKey(format, start)
if err != nil {
return errors.Trace(err)
}
end, err := flags.GetString(flagEndKey)
if err != nil {
return errors.Trace(err)
}
cfg.EndKey, err = utils.ParseKey(format, end)
if err != nil {
return errors.Trace(err)
}
if len(cfg.StartKey) > 0 && len(cfg.EndKey) > 0 && bytes.Compare(cfg.StartKey, cfg.EndKey) >= 0 {
return errors.Annotate(berrors.ErrBackupInvalidRange, "endKey must be greater than startKey")
}
cfg.CF, err = flags.GetString(flagTiKVColumnFamily)
if err != nil {
return errors.Trace(err)
}
if err = cfg.Config.ParseFromFlags(flags); err != nil {
return errors.Trace(err)
}
return nil
}
// ParseBackupConfigFromFlags parses the backup-related flags from the flag set.
func (cfg *RawKvConfig) ParseBackupConfigFromFlags(flags *pflag.FlagSet) error {
err := cfg.ParseFromFlags(flags)
if err != nil {
return errors.Trace(err)
}
compressionCfg, err := parseCompressionFlags(flags)
if err != nil {
return errors.Trace(err)
}
cfg.CompressionConfig = *compressionCfg
cfg.RemoveSchedulers, err = flags.GetBool(flagRemoveSchedulers)
if err != nil {
return errors.Trace(err)
}
level, err := flags.GetInt32(flagCompressionLevel)
if err != nil {
return errors.Trace(err)
}
cfg.CompressionLevel = level
return nil
}
// RunBackupRaw starts a backup task inside the current goroutine.
func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConfig) error {
cfg.adjust()
defer summary.Summary(cmdName)
ctx, cancel := context.WithCancel(c)
defer cancel()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("task.RunBackupRaw", opentracing.ChildOf(span.Context()))
defer span1.Finish()
ctx = opentracing.ContextWithSpan(ctx, span1)
}
u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions)
if err != nil {
return errors.Trace(err)
}
// Backup raw does not need domain.
needDomain := false
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config), cfg.CheckRequirements, needDomain, conn.NormalVersionChecker)
if err != nil {
return errors.Trace(err)
}
defer mgr.Close()
client := backup.NewBackupClient(ctx, mgr)
opts := storage.ExternalStorageOptions{
NoCredentials: cfg.NoCreds,
SendCredentials: cfg.SendCreds,
CheckS3ObjectLockOptions: true,
}
if err = client.SetStorageAndCheckNotInUse(ctx, u, &opts); err != nil {
return errors.Trace(err)
}
backupRange := rtree.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey}
if cfg.RemoveSchedulers {
restore, e := mgr.RemoveSchedulers(ctx)
defer func() {
if ctx.Err() != nil {
log.Warn("context canceled, try shutdown")
ctx = context.Background()
}
if restoreE := restore(ctx); restoreE != nil {
log.Warn("failed to restore removed schedulers, you may need to restore them manually", zap.Error(restoreE))
}
}()
if e != nil {
return errors.Trace(err)
}
}
brVersion := g.GetVersion()
clusterVersion, err := mgr.GetClusterVersion(ctx)
if err != nil {
return errors.Trace(err)
}
// The number of regions need to backup
approximateRegions, err := mgr.GetRegionCount(ctx, backupRange.StartKey, backupRange.EndKey)
if err != nil {
return errors.Trace(err)
}
summary.CollectInt("backup total regions", approximateRegions)
// Backup
// Redirect to log if there is no log file to avoid unreadable output.
updateCh := g.StartProgress(
ctx, cmdName, int64(approximateRegions), !cfg.LogProgress)
progressCallBack := func(unit backup.ProgressUnit) {
if unit == backup.RangeUnit {
return
}
updateCh.Inc()
}
req := backuppb.BackupRequest{
ClusterId: client.GetClusterID(),
StartKey: backupRange.StartKey,
EndKey: backupRange.EndKey,
StartVersion: 0,
EndVersion: 0,
RateLimit: cfg.RateLimit,
Concurrency: cfg.Concurrency,
StorageBackend: client.GetStorageBackend(),
IsRawKv: true,
Cf: cfg.CF,
CompressionType: cfg.CompressionType,
CompressionLevel: cfg.CompressionLevel,
CipherInfo: &cfg.CipherInfo,
}
rg := rtree.Range{
StartKey: backupRange.StartKey,
EndKey: backupRange.EndKey,
}
progressRange := &rtree.ProgressRange{
Res: rtree.NewRangeTree(),
Incomplete: []rtree.Range{rg},
Origin: rg,
}
metaWriter := metautil.NewMetaWriter(client.GetStorage(), metautil.MetaFileSize, false, metautil.MetaFile, &cfg.CipherInfo)
metaWriter.StartWriteMetasAsync(ctx, metautil.AppendDataFile)
err = client.BackupRange(ctx, req, map[string]string{}, progressRange, metaWriter, progressCallBack)
if err != nil {
return errors.Trace(err)
}
// Backup has finished
updateCh.Close()
rawRanges := []*backuppb.RawRange{{StartKey: backupRange.StartKey, EndKey: backupRange.EndKey, Cf: cfg.CF}}
metaWriter.Update(func(m *backuppb.BackupMeta) {
m.StartVersion = req.StartVersion
m.EndVersion = req.EndVersion
m.IsRawKv = req.IsRawKv
m.RawRanges = rawRanges
m.ClusterId = req.ClusterId
m.ClusterVersion = clusterVersion
m.BrVersion = brVersion
m.ApiVersion = client.GetApiVersion()
})
err = metaWriter.FinishWriteMetas(ctx, metautil.AppendDataFile)
if err != nil {
return errors.Trace(err)
}
err = metaWriter.FlushBackupMeta(ctx)
if err != nil {
return errors.Trace(err)
}
g.Record(summary.BackupDataSize, metaWriter.ArchiveSize())
// Set task summary to success status.
summary.SetSuccessStatus(true)
return nil
}
|
package binary_search
func BinarySearch(arr []int, value int) int {
return binarySearch(arr, 0, len(arr), value)
}
func binarySearch(arr []int, l int, r int, value int) int {
if len(arr) <= 0 || l >= r {
return -1
}
for l < r {
mid := l + (r-l)/2
//fmt.Printf("l=[%d], r=[%d], mid=[%d]\n", l, r, mid)
if arr[mid] == value {
return mid
} else if arr[mid] < value {
l = mid + 1
} else {
r = mid
}
}
return -1
}
// 查找第一个等于 value 的元素下标
func BinarySearch1(arr []int, value int) int {
return binarySearch1(arr, 0, len(arr), value)
}
func binarySearch1(arr []int, l int, r int, value int) int {
if len(arr) <= 0 || l >= r {
return -1
}
for l <= r {
mid := l + (r-l)/2
if arr[mid] == value {
if mid == 0 || arr[mid-1] < value {
return mid
} else {
r = mid - 1
}
} else if arr[mid] < value {
l = mid + 1
} else {
r = mid - 1
}
}
return -1
}
// 查询最后一个等于该值的元素下标
func BinarySearch2(arr []int, value int) int {
return binarySearch2(arr, 0, len(arr), value)
}
func binarySearch2(arr []int, l int, r int, value int) int {
if len(arr) <= 0 || l >= r {
return -1
}
for l < r {
mid := l + (r-l)/2
if arr[mid] == value {
if mid == len(arr)-1 || arr[mid+1] > value {
return mid
}
l = mid + 1
} else if arr[mid] > value {
r = mid
} else {
l = mid + 1
}
}
return -1
}
// 查询第一个大于等于给定值的元素下标
func BinarySearch3(arr []int, value int) int {
return binarySearch3(arr, 0, len(arr), value)
}
func binarySearch3(arr []int, l int, r int, value int) int {
if len(arr) <= 0 || l >= r {
return -1
}
for l < r {
mid := l + (r-l)/2
if arr[mid] == value {
if mid == 0 || arr[mid-1] < value {
return mid
}
r = mid
} else if arr[mid] > value {
if arr[mid-1] < value {
return mid
}
r = mid
} else {
l = mid + 1
}
}
return -1
}
// 查询最后一个小于等于给定值的元素下标
func BinarySearch4(arr []int, value int) int {
return binarySearch4(arr, 0, len(arr), value)
}
func binarySearch4(arr []int, l int, r int, value int) int {
if len(arr) <= 0 || l >= r {
return -1
}
for l < r {
mid := l + (r-l)/2
if arr[mid] == value {
if mid == len(arr)-1 || arr[mid+1] > value {
return mid
}
l = mid + 1
} else if arr[mid] < value {
if arr[mid+1] > value {
return mid
}
l = mid + 1
} else {
r = mid
}
}
return -1
}
|
package responses
type Results []Result
type Result map[string]interface{}
|
// +build zalandoValidation
package validators
import (
. "github.com/zalando/chimp/types"
"strings"
)
type ZalandoValidator struct{}
//Validate returns true if the passed interface is valid, false otherwise.
//If the interface cannot be passed, an error is returned.
func (*ZalandoValidator) Validate(input interface{}) (bool, error) {
//validate to chimp create/update data structure
//TODO this validation is only basic and needs improvement
//validating pierone only url
dr := input.(DeployRequest)
if !strings.HasPrefix(dr.ImageURL, "pierone.stups.zalan.do") {
return false, nil
}
return true, nil
}
func NewZalandoValidator() Validator {
return &ZalandoValidator{}
}
func init() {
New = NewZalandoValidator
}
|
package mutual
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_resource_occupyAndRelease(t *testing.T) {
// 避免 debugprint 输出
temp := needDebug
needDebug = false
defer func() { needDebug = temp }()
//
ast := assert.New(t)
//
p := 0
ts := timestamp{time: 0, process: p}
r := newResource()
r.occupy2(ts)
//
ast.Equal(ts, r.occupiedBy2)
ast.Equal(ts, r.timestamps[0])
}
func Test_resource_occupy_occupyInvalidResource(t *testing.T) {
// 避免 debugprint 输出
temp := needDebug
needDebug = false
defer func() { needDebug = temp }()
//
ast := assert.New(t)
//
p0 := 0
p1 := 1
ts0 := timestamp{time: 0, process: p0}
ts1 := timestamp{time: 1, process: p1}
r := newResource()
r.occupy2(ts0)
//
expected := fmt.Sprintf("资源正在被 %s 占据,%s 却想获取资源。", ts0, ts1)
ast.PanicsWithValue(expected, func() { r.occupy2(ts1) })
}
func Test_resource_report(t *testing.T) {
// 避免 debugprint 输出
temp := needDebug
needDebug = false
defer func() { needDebug = temp }()
//
ast := assert.New(t)
//
p := 0
ts := timestamp{time: 0, process: p}
r := newResource()
r.occupy2(ts)
r.release2(ts)
r.occupy2(ts)
r.release2(ts)
now := time.Now()
r.times[0] = now
r.times[1] = now.Add(100 * time.Second)
r.times[2] = now.Add(200 * time.Second)
r.times[3] = now.Add(400 * time.Second)
//
report := r.report()
ast.True(strings.Contains(report, "75.00%"), report)
//
ast.Equal(4, len(r.times), "资源被占用了 2 次,但是 r.times 的长度不等于 4")
}
func Test_resource_timestamps(t *testing.T) {
// 避免 debugprint 输出
temp := needDebug
needDebug = false
defer func() { needDebug = temp }()
//
ast := assert.New(t)
//
p := 0
ts := timestamp{time: 0, process: p}
r := newResource()
times := 100
//
for i := 0; i < times; i++ {
if i%2 == 0 {
ts.time++
} else {
ts.process++
}
r.occupy2(ts)
r.release2(ts)
}
//
expected := times * 2
actual := len(r.times)
ast.Equal(expected, actual)
//
ast.True(r.isSortedOccupied())
}
|
package main
import (
"encoding/json"
"fmt"
"math/rand"
"os"
"strings"
"time"
"github.com/erbridge/gotwit"
"github.com/erbridge/gotwit/twitter"
)
type (
corpus struct {
Words []string `json:"words"`
Prefixes map[string][]string `json:"prefixes"`
}
)
func getCorpus() (c corpus, err error) {
corpusFile, err := os.Open("data/corpus.json")
if err != nil {
return
}
parser := json.NewDecoder(corpusFile)
if err = parser.Decode(&c); err != nil {
return
}
return
}
func createTweetText(c corpus) (text string) {
count := rand.Intn(3) + rand.Intn(3) + 1
words := make([]string, count)
n := rand.Float32()
for i := 0; i < count; i++ {
letters := make([]string, 0)
for _, r := range c.Words[rand.Intn(len(c.Words))] {
limit := 1
if r == 's' || r == 'k' || r == 'p' || r == 'n' || r == 'a' || r == 'i' || r == 'u' {
n += rand.Float32() / 10
if n > 1 {
n = 0
}
if n > 0.9 {
limit = 4
n = rand.Float32() / 4
} else if n > 0.75 {
limit = 3
n = rand.Float32() / 3
} else if n > 0.6 {
limit = 2
n = rand.Float32() / 2
}
}
for i := 0; i < limit; i++ {
letters = append(letters, string(r))
}
}
word := strings.Join(letters, "")
index := rand.Intn(len(c.Prefixes))
for k, v := range c.Prefixes {
if index == 0 {
if strings.HasPrefix(word, k) {
prefix := v[rand.Intn(len(v))]
prefixCount := rand.Intn(4) - rand.Intn(3)
if prefixCount > 0 {
prefixes := make([]string, prefixCount+1)
for j := 0; j < prefixCount; j++ {
prefixes[j] = prefix
}
word = strings.Join(prefixes, "-") + word
}
}
break
}
index--
}
words[i] = word
}
text = strings.Join(words, " ")
return
}
func postTweet(b gotwit.Bot, c corpus) {
tweet := createTweetText(c)
fmt.Println("Posting:", tweet)
b.Post(tweet, false)
}
func main() {
var (
con twitter.ConsumerConfig
acc twitter.AccessConfig
)
f := "secrets.json"
if _, err := os.Stat(f); err == nil {
con, acc, _ = twitter.LoadConfigFile(f)
} else {
con, acc, _ = twitter.LoadConfigEnv()
}
b := gotwit.NewBot("elbownoises", con, acc)
if err := b.Start(); err != nil {
panic(err)
}
c, err := getCorpus()
if err != nil {
panic(err)
}
now := time.Now()
rand.Seed(now.UnixNano())
next := time.Date(
now.Year(),
now.Month(),
now.Day(),
now.Hour()+1,
0,
0,
0,
now.Location(),
)
sleep := next.Sub(now)
fmt.Printf("%v until first tweet\n", sleep)
time.Sleep(sleep)
postTweet(b, c)
ticker := time.NewTicker(time.Hour)
defer ticker.Stop()
for range ticker.C {
postTweet(b, c)
}
}
|
package cmd
import (
"fmt"
"os"
"os/exec"
"github.com/urfave/cli"
)
func EditCmd() cli.Command {
return cli.Command{
Name: "edit",
Aliases: []string{"e"},
Usage: "Edit emoji commit messages",
Action: edit,
}
}
func edit(c *cli.Context) error {
editor := os.Getenv("EDITOR")
if editor == "" {
editor = "vim"
}
filename, err := msgFilePath()
if err != nil {
fmt.Println(err)
return err
}
cmd := exec.Command(editor, filename)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
fmt.Println(err)
return err
}
return nil
}
|
package repository_test
import (
"errors"
"strings"
"testing"
"github.com/goodplayer/Princess/repository"
"github.com/gofrs/uuid"
)
func TestDb_SaveUser(t *testing.T) {
repository.Init()
db := repository.NewDb(repository.GlobalDb)
uid := uuid.Must(uuid.NewV7())
user := &repository.User{
UserId: uid.String(),
UserName: strings.ReplaceAll(uid.String(), "-", ""),
AliasName: "demoalias01",
Password: "password01",
UserStatus: 1,
}
if _, err := db.SaveUser(user); err != nil {
t.Fatal(err)
}
if _, err := db.SaveUser(user); err == nil {
t.Fatal(errors.New("should report error due to primary key conflict"))
}
}
|
package multiple
import (
"errors"
"io"
"log"
"strings"
"time"
)
type UserMgr struct {
conns []localConn
// Network string
// Addrs []string
Listens []string
dialTimeout, writeTimeout time.Duration
readTimeout time.Duration
encoder MultipleEncoder
done chan bool
// packet 上层调用send发送的完整数据
// fragment 底层对packet的分片, 按maxmtu调整
recvque chan []byte
readyque chan *Packet
reader *packetReader
}
var (
_ io.ReadWriteCloser = new(UserMgr)
)
type connReturn struct {
n int
err error
}
func (w *UserMgr) Write(p []byte) (int, error) {
r := make(chan connReturn)
datas := w.encoder.Encode(p)
for _, conn := range w.conns {
go w.writeConn(conn, datas, r)
}
var errs []string
var rr connReturn
for range w.conns {
rr = <-r
if rr.err == nil {
return len(p), rr.err
} else {
errs = append(errs, rr.err.Error())
}
}
return 0, errors.New(strings.Join(errs, "|"))
}
func (w *UserMgr) writeConn(conn localConn, datas [][]byte, r chan<- connReturn) {
var total, n int
var err error
if w.writeTimeout > 0 {
conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
}
for _, data := range datas {
n, err = conn.Write(data)
total += n
if err != nil {
break
}
}
r <- connReturn{total, err}
}
func (w *UserMgr) readStart() {
w.reader.SetReading(true)
go w.reader.refreshReadyMinid()
}
func (w *UserMgr) readEnd() {
w.reader.SetReading(false)
}
func (w *UserMgr) Read(p []byte) (n int, err error) {
var timer = time.NewTimer(0)
timer.Stop()
if w.readTimeout > 0 {
timer.Reset(w.readTimeout)
}
w.readStart()
defer w.readEnd()
var packet *Packet
select {
case <-timer.C:
return 0, errors.New("read timeout")
case packet = <-w.readyque:
}
n = copy(p, packet.ReadyData())
packet.ReleaseMem()
return
}
func (w *UserMgr) recvLoop(conn localConn) {
for {
data := pool.Get().([]byte)
n, err := conn.Read(data)
if err != nil {
log.Println("recvLoop err", n, err)
return
}
log.Println("recvLoop before")
w.recvque <- data[:n]
log.Println("recvLoop after")
}
}
func (w *UserMgr) Close() error {
var errs []string
for _, conn := range w.conns {
err := conn.Close()
if err != nil {
errs = append(errs, err.Error())
}
}
if len(errs) != 0 {
return errors.New(strings.Join(errs, "|"))
}
close(w.done)
return nil
}
func (w *UserMgr) establishConnection(conns []localConn) (err error) {
w.conns = conns
// go w.timerClearTimeouReadingPacket()
go w.reader.decodeRecvDataLoop()
for _, conn := range w.conns {
go w.recvLoop(conn)
}
return nil
}
func NewUserMgr(conns []localConn, timeout []string) (io.ReadWriteCloser, error) {
w := &UserMgr{
// Network: network,
// Addrs: addrs,
done: make(chan bool),
recvque: make(chan []byte),
readyque: make(chan *Packet),
}
buffertimeout, _ := time.ParseDuration(timeout[3])
w.reader = NewPacketReader(w.recvque, w.readyque, w.done, buffertimeout)
w.readTimeout, _ = time.ParseDuration(timeout[2])
w.writeTimeout, _ = time.ParseDuration(timeout[1])
w.dialTimeout, _ = time.ParseDuration(timeout[0])
err := w.establishConnection(conns)
if err != nil {
return nil, err
}
return w, nil
}
|
package config
import (
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
// AddCommandLine setup Graphite specific cli args and flags.
func AddCommandLine(app *kingpin.Application, cfg *Config) {
app.Flag("graphite.default-prefix",
"The prefix to prepend to all metrics exported to Graphite.").
StringVar(&cfg.DefaultPrefix)
app.Flag("graphite.read.url",
"The URL of the remote Graphite Web server to send samples to.").
StringVar(&cfg.Read.URL)
app.Flag("graphite.read.max-point-delta",
"If set, interval used to linearly interpolate intermediate points.").
DurationVar(&cfg.Read.MaxPointDelta)
app.Flag("graphite.write.carbon-address",
"The host:port of the Graphite server to send samples to.").
StringVar(&cfg.Write.CarbonAddress)
app.Flag("graphite.write.carbon-transport",
"Transport protocol to use to communicate with Graphite.").
StringVar(&cfg.Write.CarbonTransport)
app.Flag("graphite.write.enable-paths-cache",
"Enables a cache to graphite paths lists for written metrics.").
BoolVar(&cfg.Write.EnablePathsCache)
app.Flag("graphite.write.paths-cache-ttl",
"Duration TTL of items within the paths cache.").
DurationVar(&cfg.Write.PathsCacheTTL)
app.Flag("graphite.write.paths-cache-purge-interval",
"Duration between purges for expired items in the paths cache.").
DurationVar(&cfg.Write.PathsCachePurgeInterval)
app.Flag("graphite.enable-tags",
"Use Graphite tags.").
BoolVar(&cfg.EnableTags)
app.Flag("graphite.filtered-tags",
"Use Graphite tags only for given tag names; Multiple names must be separated by a comma. Eg: app_name,job_name").
StringVar(&cfg.FilteredTags)
}
|
package leetcode
/*X is a good number if after rotating each digit individually by 180 degrees, we get a valid number that is different from X. Each digit must be rotated - we cannot choose to leave it alone.
A number is valid if each digit remains a digit after rotation. 0, 1, and 8 rotate to themselves; 2 and 5 rotate to each other; 6 and 9 rotate to each other, and the rest of the numbers do not rotate to any other number and become invalid.
Now given a positive number N, how many numbers X from 1 to N are good?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/rotated-digits
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
func rotatedDigits(N int) int {
res := 0
for i := 1; i <= N; i++ {
if isGood(i) {
res++
}
}
return res
}
func isGood(N int) bool {
differ := false
for N > 0 {
switch N % 10 {
case 0:
case 1:
case 8:
case 2, 5, 6, 9:
differ = true
default:
return false
}
N = N / 10
}
return differ
}
|
/*
Write a method that accepts two integer parameters rows and cols.
The output is a 2d array of numbers displayed in column-major order,
meaning the numbers shown increase sequentially down each column and wrap
to the top of the next column to the right once the bottom of the current column is reached.
Examples
printGrid(3, 6) ➞ [
[1, 4, 7, 10, 13, 16],
[2, 5, 8, 11, 14, 17],
[3, 6, 9, 12, 15, 18]
]
printGrid(5, 3) ➞ [
[1, 6, 11],
[2, 7, 12],
[3, 8, 13],
[4, 9, 14],
[5, 10, 15]
]
printGrid(4, 1) ➞ [
[1],
[2],
[3],
[4]
]
Notes
N/A
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test(3, 6, [][]int{{1, 4, 7, 10, 13, 16}, {2, 5, 8, 11, 14, 17}, {3, 6, 9, 12, 15, 18}})
test(5, 3, [][]int{{1, 6, 11}, {2, 7, 12}, {3, 8, 13}, {4, 9, 14}, {5, 10, 15}})
test(4, 1, [][]int{{1}, {2}, {3}, {4}})
test(1, 3, [][]int{{1, 2, 3}})
test(10, 2, [][]int{{1, 11}, {2, 12}, {3, 13}, {4, 14}, {5, 15}, {6, 16}, {7, 17}, {8, 18}, {9, 19}, {10, 20}})
}
func test(r, c int, q [][]int) {
p := grid(r, c)
fmt.Println(p)
assert(reflect.DeepEqual(p, q))
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func grid(r, c int) [][]int {
p := make([][]int, r)
q := make([]int, r*c)
for i := range p {
p[i] = q[i*c : (i+1)*c]
}
n := 1
for x := 0; x < c; x++ {
for y := 0; y < r; y++ {
p[y][x], n = n, n+1
}
}
return p
}
|
package post
import (
"time"
"yj-app/app/yjgframe/db"
)
type Entity struct {
PostId int64 `json:"post_id" xorm:"not null pk autoincr comment('岗位ID') BIGINT(20)"`
PostCode string `json:"post_code" xorm:"not null comment('岗位编码') VARCHAR(64)"`
PostName string `json:"post_name" xorm:"not null comment('岗位名称') VARCHAR(50)"`
PostSort int `json:"post_sort" xorm:"not null comment('显示顺序') INT(4)"`
Status string `json:"status" xorm:"not null comment('状态(0正常 1停用)') CHAR(1)"`
CreateBy string `json:"create_by" xorm:"default '' comment('创建者') VARCHAR(64)"`
CreateTime time.Time `json:"create_time" xorm:"comment('创建时间') DATETIME"`
UpdateBy string `json:"update_by" xorm:"default '' comment('更新者') VARCHAR(64)"`
UpdateTime time.Time `json:"update_time" xorm:"comment('更新时间') DATETIME"`
Remark string `json:"remark" xorm:"comment('备注') VARCHAR(500)"`
}
//映射数据表
func TableName() string {
return "sys_post"
}
// 插入数据
func (r *Entity) Insert() (int64, error) {
return db.Instance().Engine().Table(TableName()).Insert(r)
}
// 更新数据
func (r *Entity) Update() (int64, error) {
return db.Instance().Engine().Table(TableName()).ID(r.PostId).Update(r)
}
// 删除
func (r *Entity) Delete() (int64, error) {
return db.Instance().Engine().Table(TableName()).ID(r.PostId).Delete(r)
}
//批量删除
func DeleteBatch(ids ...int64) (int64, error) {
return db.Instance().Engine().Table(TableName()).In("post_id", ids).Delete(new(Entity))
}
// 根据结构体中已有的非空数据来获得单条数据
func (r *Entity) FindOne() (bool, error) {
return db.Instance().Engine().Table(TableName()).Get(r)
}
// 根据条件查询
func Find(where, order string) ([]Entity, error) {
var list []Entity
err := db.Instance().Engine().Table(TableName()).Where(where).OrderBy(order).Find(&list)
return list, err
}
//指定字段集合查询
func FindIn(column string, args ...interface{}) ([]Entity, error) {
var list []Entity
err := db.Instance().Engine().Table(TableName()).In(column, args).Find(&list)
return list, err
}
//排除指定字段集合查询
func FindNotIn(column string, args ...interface{}) ([]Entity, error) {
var list []Entity
err := db.Instance().Engine().Table(TableName()).NotIn(column, args).Find(&list)
return list, err
}
|
package env
import (
"log"
"os"
"regexp"
"strconv"
"strings"
"sync"
)
const (
broadcasterHTTPPortEnvName = "BROADCASTER_HTTP_PORT"
broadcasterBufferSizeEnvName = "BROADCASTER_BUFFER_SIZE"
broadcasterHostList = "BROADCASTER_HOST_LIST"
)
var (
httpPort = 7000
bufferSize = 1024
hostList = []string{"localhost:7001", "localhost:7002"}
once sync.Once
)
func GetHTTPPort() int {
ensureInit()
return httpPort
}
func GetBufferSize() int {
ensureInit()
return bufferSize
}
func GetHostList() []string {
ensureInit()
return hostList
}
func ensureInit() {
once.Do(func() {
initHttpPort := func() {
port := os.Getenv(broadcasterHTTPPortEnvName)
if port == "" {
log.Printf("Using default server port: %d", httpPort)
return
}
intPort, err := strconv.Atoi(port)
if err != nil || intPort <= 0 {
log.Printf("Invalid server port defined, using default one: %d", httpPort)
return
}
log.Printf("Setting HTTP port: %d", intPort)
httpPort = intPort
}
initBufferSize := func() {
size := os.Getenv(broadcasterBufferSizeEnvName)
if size == "" {
log.Printf("Using default buffer size: %d", bufferSize)
return
}
intBufferSize, err := strconv.Atoi(size)
if err != nil || intBufferSize <= 0 {
log.Printf("Invalid buffer size defined, using default one: %d", bufferSize)
return
}
log.Printf("Setting buffer size: %d", intBufferSize)
bufferSize = intBufferSize
}
initHostList := func() {
list := os.Getenv(broadcasterHostList)
if list == "" {
log.Printf("Using default host list: %s", hostList)
return
}
r := regexp.MustCompile(`\w+(:\d+)?(;\w+(:\d+)?)*`)
if !r.MatchString(list) {
log.Printf("Invalid host list defined, using default one: %s", hostList)
return
}
log.Printf("Setting host list: %s", list)
hostList = strings.Split(list, ";")
}
initHttpPort()
initBufferSize()
initHostList()
})
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
// Search API where the search will only be executed after specified checkpoints
// are available due to a refresh. This API is designed for internal use by the
// fleet server project.
package search
import (
gobytes "bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/elastic/elastic-transport-go/v8/elastictransport"
"github.com/elastic/go-elasticsearch/v8/typedapi/types"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/expandwildcard"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/operator"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/searchtype"
"github.com/elastic/go-elasticsearch/v8/typedapi/types/enums/suggestmode"
)
const (
indexMask = iota + 1
)
// ErrBuildPath is returned in case of missing parameters within the build of the request.
var ErrBuildPath = errors.New("cannot build path, check for missing path parameters")
type Search struct {
transport elastictransport.Interface
headers http.Header
values url.Values
path url.URL
buf *gobytes.Buffer
req *Request
deferred []func(request *Request) error
raw io.Reader
paramSet int
index string
}
// NewSearch type alias for index.
type NewSearch func(index string) *Search
// NewSearchFunc returns a new instance of Search with the provided transport.
// Used in the index of the library this allows to retrieve every apis in once place.
func NewSearchFunc(tp elastictransport.Interface) NewSearch {
return func(index string) *Search {
n := New(tp)
n.Index(index)
return n
}
}
// Search API where the search will only be executed after specified checkpoints
// are available due to a refresh. This API is designed for internal use by the
// fleet server project.
//
//
func New(tp elastictransport.Interface) *Search {
r := &Search{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
req: NewRequest(),
}
return r
}
// Raw takes a json payload as input which is then passed to the http.Request
// If specified Raw takes precedence on Request method.
func (r *Search) Raw(raw io.Reader) *Search {
r.raw = raw
return r
}
// Request allows to set the request property with the appropriate payload.
func (r *Search) Request(req *Request) *Search {
r.req = req
return r
}
// HttpRequest returns the http.Request object built from the
// given parameters.
func (r *Search) HttpRequest(ctx context.Context) (*http.Request, error) {
var path strings.Builder
var method string
var req *http.Request
var err error
if len(r.deferred) > 0 {
for _, f := range r.deferred {
deferredErr := f(r.req)
if deferredErr != nil {
return nil, deferredErr
}
}
}
if r.raw != nil {
r.buf.ReadFrom(r.raw)
} else if r.req != nil {
data, err := json.Marshal(r.req)
if err != nil {
return nil, fmt.Errorf("could not serialise request for Search: %w", err)
}
r.buf.Write(data)
}
r.path.Scheme = "http"
switch {
case r.paramSet == indexMask:
path.WriteString("/")
path.WriteString(r.index)
path.WriteString("/")
path.WriteString("_fleet")
path.WriteString("/")
path.WriteString("_fleet_search")
method = http.MethodPost
}
r.path.Path = path.String()
r.path.RawQuery = r.values.Encode()
if r.path.Path == "" {
return nil, ErrBuildPath
}
if ctx != nil {
req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf)
} else {
req, err = http.NewRequest(method, r.path.String(), r.buf)
}
req.Header = r.headers.Clone()
if req.Header.Get("Content-Type") == "" {
if r.buf.Len() > 0 {
req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8")
}
}
if req.Header.Get("Accept") == "" {
req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8")
}
if err != nil {
return req, fmt.Errorf("could not build http.Request: %w", err)
}
return req, nil
}
// Perform runs the http.Request through the provided transport and returns an http.Response.
func (r Search) Perform(ctx context.Context) (*http.Response, error) {
req, err := r.HttpRequest(ctx)
if err != nil {
return nil, err
}
res, err := r.transport.Perform(req)
if err != nil {
return nil, fmt.Errorf("an error happened during the Search query execution: %w", err)
}
return res, nil
}
// Do runs the request through the transport, handle the response and returns a search.Response
func (r Search) Do(ctx context.Context) (*Response, error) {
response := NewResponse()
r.TypedKeys(true)
res, err := r.Perform(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < 299 {
err = json.NewDecoder(res.Body).Decode(response)
if err != nil {
return nil, err
}
return response, nil
}
errorResponse := types.NewElasticsearchError()
err = json.NewDecoder(res.Body).Decode(errorResponse)
if err != nil {
return nil, err
}
if errorResponse.Status == 0 {
errorResponse.Status = res.StatusCode
}
return nil, errorResponse
}
// Header set a key, value pair in the Search headers map.
func (r *Search) Header(key, value string) *Search {
r.headers.Set(key, value)
return r
}
// Index A single target to search. If the target is an index alias, it must resolve
// to a single index.
// API Name: index
func (r *Search) Index(index string) *Search {
r.paramSet |= indexMask
r.index = index
return r
}
// API name: allow_no_indices
func (r *Search) AllowNoIndices(allownoindices bool) *Search {
r.values.Set("allow_no_indices", strconv.FormatBool(allownoindices))
return r
}
// API name: analyzer
func (r *Search) Analyzer(analyzer string) *Search {
r.values.Set("analyzer", analyzer)
return r
}
// API name: analyze_wildcard
func (r *Search) AnalyzeWildcard(analyzewildcard bool) *Search {
r.values.Set("analyze_wildcard", strconv.FormatBool(analyzewildcard))
return r
}
// API name: batched_reduce_size
func (r *Search) BatchedReduceSize(batchedreducesize string) *Search {
r.values.Set("batched_reduce_size", batchedreducesize)
return r
}
// API name: ccs_minimize_roundtrips
func (r *Search) CcsMinimizeRoundtrips(ccsminimizeroundtrips bool) *Search {
r.values.Set("ccs_minimize_roundtrips", strconv.FormatBool(ccsminimizeroundtrips))
return r
}
// API name: default_operator
func (r *Search) DefaultOperator(defaultoperator operator.Operator) *Search {
r.values.Set("default_operator", defaultoperator.String())
return r
}
// API name: df
func (r *Search) Df(df string) *Search {
r.values.Set("df", df)
return r
}
// API name: expand_wildcards
func (r *Search) ExpandWildcards(expandwildcards ...expandwildcard.ExpandWildcard) *Search {
tmp := []string{}
for _, item := range expandwildcards {
tmp = append(tmp, item.String())
}
r.values.Set("expand_wildcards", strings.Join(tmp, ","))
return r
}
// API name: ignore_throttled
func (r *Search) IgnoreThrottled(ignorethrottled bool) *Search {
r.values.Set("ignore_throttled", strconv.FormatBool(ignorethrottled))
return r
}
// API name: ignore_unavailable
func (r *Search) IgnoreUnavailable(ignoreunavailable bool) *Search {
r.values.Set("ignore_unavailable", strconv.FormatBool(ignoreunavailable))
return r
}
// API name: lenient
func (r *Search) Lenient(lenient bool) *Search {
r.values.Set("lenient", strconv.FormatBool(lenient))
return r
}
// API name: max_concurrent_shard_requests
func (r *Search) MaxConcurrentShardRequests(maxconcurrentshardrequests string) *Search {
r.values.Set("max_concurrent_shard_requests", maxconcurrentshardrequests)
return r
}
// API name: min_compatible_shard_node
func (r *Search) MinCompatibleShardNode(versionstring string) *Search {
r.values.Set("min_compatible_shard_node", versionstring)
return r
}
// API name: preference
func (r *Search) Preference(preference string) *Search {
r.values.Set("preference", preference)
return r
}
// API name: pre_filter_shard_size
func (r *Search) PreFilterShardSize(prefiltershardsize string) *Search {
r.values.Set("pre_filter_shard_size", prefiltershardsize)
return r
}
// API name: request_cache
func (r *Search) RequestCache(requestcache bool) *Search {
r.values.Set("request_cache", strconv.FormatBool(requestcache))
return r
}
// API name: routing
func (r *Search) Routing(routing string) *Search {
r.values.Set("routing", routing)
return r
}
// API name: scroll
func (r *Search) Scroll(duration string) *Search {
r.values.Set("scroll", duration)
return r
}
// API name: search_type
func (r *Search) SearchType(searchtype searchtype.SearchType) *Search {
r.values.Set("search_type", searchtype.String())
return r
}
// SuggestField Specifies which field to use for suggestions.
// API name: suggest_field
func (r *Search) SuggestField(field string) *Search {
r.values.Set("suggest_field", field)
return r
}
// API name: suggest_mode
func (r *Search) SuggestMode(suggestmode suggestmode.SuggestMode) *Search {
r.values.Set("suggest_mode", suggestmode.String())
return r
}
// API name: suggest_size
func (r *Search) SuggestSize(suggestsize string) *Search {
r.values.Set("suggest_size", suggestsize)
return r
}
// SuggestText The source text for which the suggestions should be returned.
// API name: suggest_text
func (r *Search) SuggestText(suggesttext string) *Search {
r.values.Set("suggest_text", suggesttext)
return r
}
// API name: typed_keys
func (r *Search) TypedKeys(typedkeys bool) *Search {
r.values.Set("typed_keys", strconv.FormatBool(typedkeys))
return r
}
// API name: rest_total_hits_as_int
func (r *Search) RestTotalHitsAsInt(resttotalhitsasint bool) *Search {
r.values.Set("rest_total_hits_as_int", strconv.FormatBool(resttotalhitsasint))
return r
}
// API name: _source_excludes
func (r *Search) SourceExcludes_(fields ...string) *Search {
r.values.Set("_source_excludes", strings.Join(fields, ","))
return r
}
// API name: _source_includes
func (r *Search) SourceIncludes_(fields ...string) *Search {
r.values.Set("_source_includes", strings.Join(fields, ","))
return r
}
// API name: q
func (r *Search) Q(q string) *Search {
r.values.Set("q", q)
return r
}
// WaitForCheckpoints A comma separated list of checkpoints. When configured, the search API will
// only be executed on a shard
// after the relevant checkpoint has become visible for search. Defaults to an
// empty list which will cause
// Elasticsearch to immediately execute the search.
// API name: wait_for_checkpoints
func (r *Search) WaitForCheckpoints(waitforcheckpoints ...int64) *Search {
tmp := []string{}
for _, item := range waitforcheckpoints {
tmp = append(tmp, fmt.Sprintf("%v", item))
}
r.values.Set("wait_for_checkpoints", strings.Join(tmp, ","))
return r
}
// AllowPartialSearchResults If true, returns partial results if there are shard request timeouts or
// [shard
// failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures).
// If false, returns
// an error with no partial results. Defaults to the configured cluster setting
// `search.default_allow_partial_results`
// which is true by default.
// API name: allow_partial_search_results
func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search {
r.values.Set("allow_partial_search_results", strconv.FormatBool(allowpartialsearchresults))
return r
}
// API name: aggregations
func (r *Search) Aggregations(aggregations map[string]types.Aggregations) *Search {
r.req.Aggregations = aggregations
return r
}
// API name: collapse
func (r *Search) Collapse(collapse *types.FieldCollapse) *Search {
r.req.Collapse = collapse
return r
}
// DocvalueFields Array of wildcard (*) patterns. The request returns doc values for field
// names matching these patterns in the hits.fields property of the response.
// API name: docvalue_fields
func (r *Search) DocvalueFields(docvaluefields ...types.FieldAndFormat) *Search {
r.req.DocvalueFields = docvaluefields
return r
}
// Explain If true, returns detailed information about score computation as part of a
// hit.
// API name: explain
func (r *Search) Explain(explain bool) *Search {
r.req.Explain = &explain
return r
}
// Ext Configuration of search extensions defined by Elasticsearch plugins.
// API name: ext
func (r *Search) Ext(ext map[string]json.RawMessage) *Search {
r.req.Ext = ext
return r
}
// Fields Array of wildcard (*) patterns. The request returns values for field names
// matching these patterns in the hits.fields property of the response.
// API name: fields
func (r *Search) Fields(fields ...types.FieldAndFormat) *Search {
r.req.Fields = fields
return r
}
// From Starting document offset. By default, you cannot page through more than
// 10,000
// hits using the from and size parameters. To page through more hits, use the
// search_after parameter.
// API name: from
func (r *Search) From(from int) *Search {
r.req.From = &from
return r
}
// API name: highlight
func (r *Search) Highlight(highlight *types.Highlight) *Search {
r.req.Highlight = highlight
return r
}
// IndicesBoost Boosts the _score of documents from specified indices.
// API name: indices_boost
func (r *Search) IndicesBoost(indicesboosts ...map[string]types.Float64) *Search {
r.req.IndicesBoost = indicesboosts
return r
}
// MinScore Minimum _score for matching documents. Documents with a lower _score are
// not included in the search results.
// API name: min_score
func (r *Search) MinScore(minscore types.Float64) *Search {
r.req.MinScore = &minscore
return r
}
// Pit Limits the search to a point in time (PIT). If you provide a PIT, you
// cannot specify an <index> in the request path.
// API name: pit
func (r *Search) Pit(pit *types.PointInTimeReference) *Search {
r.req.Pit = pit
return r
}
// API name: post_filter
func (r *Search) PostFilter(postfilter *types.Query) *Search {
r.req.PostFilter = postfilter
return r
}
// API name: profile
func (r *Search) Profile(profile bool) *Search {
r.req.Profile = &profile
return r
}
// Query Defines the search definition using the Query DSL.
// API name: query
func (r *Search) Query(query *types.Query) *Search {
r.req.Query = query
return r
}
// API name: rescore
func (r *Search) Rescore(rescores ...types.Rescore) *Search {
r.req.Rescore = rescores
return r
}
// RuntimeMappings Defines one or more runtime fields in the search request. These fields take
// precedence over mapped fields with the same name.
// API name: runtime_mappings
func (r *Search) RuntimeMappings(runtimefields types.RuntimeFields) *Search {
r.req.RuntimeMappings = runtimefields
return r
}
// ScriptFields Retrieve a script evaluation (based on different fields) for each hit.
// API name: script_fields
func (r *Search) ScriptFields(scriptfields map[string]types.ScriptField) *Search {
r.req.ScriptFields = scriptfields
return r
}
// API name: search_after
func (r *Search) SearchAfter(sortresults ...types.FieldValue) *Search {
r.req.SearchAfter = sortresults
return r
}
// SeqNoPrimaryTerm If true, returns sequence number and primary term of the last modification
// of each hit. See Optimistic concurrency control.
// API name: seq_no_primary_term
func (r *Search) SeqNoPrimaryTerm(seqnoprimaryterm bool) *Search {
r.req.SeqNoPrimaryTerm = &seqnoprimaryterm
return r
}
// Size The number of hits to return. By default, you cannot page through more
// than 10,000 hits using the from and size parameters. To page through more
// hits, use the search_after parameter.
// API name: size
func (r *Search) Size(size int) *Search {
r.req.Size = &size
return r
}
// API name: slice
func (r *Search) Slice(slice *types.SlicedScroll) *Search {
r.req.Slice = slice
return r
}
// API name: sort
func (r *Search) Sort(sorts ...types.SortCombinations) *Search {
r.req.Sort = sorts
return r
}
// Source_ Indicates which source fields are returned for matching documents. These
// fields are returned in the hits._source property of the search response.
// API name: _source
func (r *Search) Source_(sourceconfig types.SourceConfig) *Search {
r.req.Source_ = sourceconfig
return r
}
// Stats Stats groups to associate with the search. Each group maintains a statistics
// aggregation for its associated searches. You can retrieve these stats using
// the indices stats API.
// API name: stats
func (r *Search) Stats(stats ...string) *Search {
r.req.Stats = stats
return r
}
// StoredFields List of stored fields to return as part of a hit. If no fields are specified,
// no stored fields are included in the response. If this field is specified,
// the _source
// parameter defaults to false. You can pass _source: true to return both source
// fields
// and stored fields in the search response.
// API name: stored_fields
func (r *Search) StoredFields(fields ...string) *Search {
r.req.StoredFields = fields
return r
}
// API name: suggest
func (r *Search) Suggest(suggest *types.Suggester) *Search {
r.req.Suggest = suggest
return r
}
// TerminateAfter Maximum number of documents to collect for each shard. If a query reaches
// this
// limit, Elasticsearch terminates the query early. Elasticsearch collects
// documents
// before sorting. Defaults to 0, which does not terminate query execution
// early.
// API name: terminate_after
func (r *Search) TerminateAfter(terminateafter int64) *Search {
r.req.TerminateAfter = &terminateafter
return r
}
// Timeout Specifies the period of time to wait for a response from each shard. If no
// response
// is received before the timeout expires, the request fails and returns an
// error.
// Defaults to no timeout.
// API name: timeout
func (r *Search) Timeout(timeout string) *Search {
r.req.Timeout = &timeout
return r
}
// TrackScores If true, calculate and return document scores, even if the scores are not
// used for sorting.
// API name: track_scores
func (r *Search) TrackScores(trackscores bool) *Search {
r.req.TrackScores = &trackscores
return r
}
// TrackTotalHits Number of hits matching the query to count accurately. If true, the exact
// number of hits is returned at the cost of some performance. If false, the
// response does not include the total number of hits matching the query.
// Defaults to 10,000 hits.
// API name: track_total_hits
func (r *Search) TrackTotalHits(trackhits types.TrackHits) *Search {
r.req.TrackTotalHits = trackhits
return r
}
// Version If true, returns document version as part of a hit.
// API name: version
func (r *Search) Version(version bool) *Search {
r.req.Version = &version
return r
}
|
//
// This package was written by Paul Schou in Dec 2020
//
// Originally intended to help with linking two apps together and expanded to be a general
// open source software for use to link apps together that usually don't do mTLS (mutual TLS)
//
package main
import (
"crypto/rand"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"github.com/pschou/go-params"
"io"
"io/ioutil"
"log"
"net"
"os"
"sync"
"time"
)
type DNS struct {
Addr string
Time time.Time
}
var target_addr = ""
var DNSCache = make(map[string]DNS, 0)
var keyFile = ""
var certFile = ""
var keypair *tls.Certificate
var keypair_count = 0
var keypair_mu sync.RWMutex
var rootFile = ""
var root_count = 0
var rootpool *x509.CertPool
var certs_loaded = make(map[string]bool, 0)
var debug = false
var version = "not set"
func loadKeys() {
keypair_mu.RLock()
defer keypair_mu.RUnlock()
var err error
tmp_key, err_k := tls.LoadX509KeyPair(certFile, keyFile)
if err_k != nil {
if keypair == nil {
log.Fatalf("failed to loadkey pair: %s", err)
}
keypair_count++
log.Println("WARNING: Cannot load keypair (cert/key)", certFile, keyFile, "attempt:", keypair_count)
if keypair_count > 10 {
log.Fatalf("failed to loadkey pair: %s", err)
}
} else {
if debug {
log.Println("Loaded keypair", certFile, keyFile)
}
keypair = &tmp_key
keypair_count = 0
}
err_r := LoadCertficatesFromFile(rootFile)
if err_r != nil {
if rootpool == nil {
log.Fatalf("failed to load CA: %s", err)
}
root_count++
log.Println("WARNING: Cannot load CA file", rootFile, "attempt:", root_count)
if root_count > 10 {
log.Fatalf("failed to CA: %s", err)
}
} else {
if debug {
log.Println("Loaded CA", rootFile)
}
root_count = 0
}
}
func main() {
params.Usage = func() {
fmt.Fprintf(os.Stderr, "Simple SSL forwarder, written by Paul Schou github@paulschou.com in December 2020\nAll rights reserved, personal use only, provided AS-IS -- not responsible for loss.\nUsage implies agreement. Version: %s\n\nUsage: %s [options...]\n\n", version, os.Args[0])
params.PrintDefaults()
}
var tls_enabled = params.Bool("tls", true, "Enable listener TLS", "BOOL")
var verbose = params.Pres("debug", "Verbose output")
params.GroupingSet("Listener")
var listen = params.String("listen", ":7443", "Listen address for forwarder", "HOST:PORT")
var verify_server = params.Bool("verify-server", true, "Verify server, do certificate checks", "BOOL")
var secure_server = params.Bool("secure-server", true, "Enforce minimum of TLS 1.2 on server side", "BOOL")
params.GroupingSet("Target")
var target = params.String("target", "127.0.0.1:443", "Sending address for forwarder", "HOST:PORT")
var verify_client = params.Bool("verify-client", true, "Verify client, do certificate checks", "BOOL")
var secure_client = params.Bool("secure-client", true, "Enforce minimum of TLS 1.2 on client side", "BOOL")
var tls_host = params.String("host", "", "Hostname to verify outgoing connection with", "FQDN")
params.GroupingSet("Certificate")
var cert_file = params.String("cert", "/etc/pki/server.pem", "File to load with CERT - automatically reloaded every minute\n", "FILE")
var key_file = params.String("key", "/etc/pki/server.pem", "File to load with KEY - automatically reloaded every minute\n", "FILE")
var root_file = params.String("ca", "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", "File to load with ROOT CAs - reloaded every minute by adding any new entries\n", "FILE")
params.CommandLine.Indent = 2
params.Parse()
var err error
debug = *verbose
keyFile = *key_file
certFile = *cert_file
rootFile = *root_file
rootpool = x509.NewCertPool()
loadKeys()
go func() {
ticker := time.NewTicker(time.Minute)
for {
select {
case <-ticker.C:
loadKeys()
}
}
}()
var l net.Listener
if *tls_enabled {
var config tls.Config
if *secure_server {
config = tls.Config{RootCAs: rootpool,
Certificates: []tls.Certificate{},
ClientCAs: rootpool, InsecureSkipVerify: *verify_server == false,
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
} else {
config = tls.Config{RootCAs: rootpool,
ClientCAs: rootpool, InsecureSkipVerify: *verify_server == false}
}
config.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
if debug {
log.Println(" Get Cert Returning keypair")
}
return keypair, nil
}
config.Rand = rand.Reader
if debug {
fmt.Println("TLS Listening on", *listen)
}
if l, err = tls.Listen("tcp", *listen, &config); err != nil {
log.Fatal(err)
}
} else {
var err error
if debug {
fmt.Println("Listening on", *listen)
}
if l, err = net.Listen("tcp", *listen); err != nil {
log.Fatal(err)
}
}
if debug {
fmt.Println("Target set to", *target)
}
target_addr = *target
defer l.Close()
for {
conn, err := l.Accept() // Wait for a connection.
if err != nil {
fmt.Println("Error on accept", err)
continue
}
if debug {
fmt.Println("New connection from", conn.RemoteAddr())
}
go func(c net.Conn) {
defer conn.Close()
defer c.Close()
var tlsConfig *tls.Config
if *secure_client {
tlsConfig = &tls.Config{Certificates: []tls.Certificate{*keypair}, RootCAs: rootpool,
ClientCAs: rootpool, InsecureSkipVerify: *verify_client == false, ServerName: *tls_host,
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
} else {
tlsConfig = &tls.Config{Certificates: []tls.Certificate{*keypair}, RootCAs: rootpool,
ClientCAs: rootpool, InsecureSkipVerify: *verify_client == false, ServerName: *tls_host}
}
tlsConfig.Rand = rand.Reader
if debug {
log.Println("dialing endpoint:", target_addr)
}
remote, err := tls.Dial("tcp", target_addr, tlsConfig)
if err != nil {
log.Println("error dialing endpoint:", target_addr, "error:", err)
return
}
if debug {
log.Println("connected!", target_addr)
}
go io.Copy(remote, c)
io.Copy(c, remote)
}(conn)
}
}
func LoadCertficatesFromFile(path string) error {
raw, err := ioutil.ReadFile(path)
if err != nil {
return err
}
for {
block, rest := pem.Decode(raw)
if block == nil {
break
}
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
fmt.Println("warning: error parsing CA cert", err)
continue
}
t := fmt.Sprintf("%v%v", cert.SerialNumber, cert.Subject)
if _, ok := certs_loaded[t]; !ok {
if debug {
fmt.Println(" Adding CA:", cert.Subject)
}
rootpool.AddCert(cert)
certs_loaded[t] = true
}
}
raw = rest
}
return nil
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider"
"github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface"
)
type mockLogout struct {
cognitoidentityprovideriface.CognitoIdentityProviderAPI
Response cognitoidentityprovider.GlobalSignOutOutput
}
func (d mockLogout) GlobalSignOut(in *cognitoidentityprovider.GlobalSignOutInput) (*cognitoidentityprovider.GlobalSignOutOutput, error) {
if *in.AccessToken != "CorrectToken" {
return nil, awserr.New(
cognitoidentityprovider.ErrCodeResourceNotFoundException,
"Resources not found",
errors.New("Resources not found"),
)
}
return &d.Response, nil
}
func TestHandleRequest(t *testing.T) {
t.Run("Successfully logout a user", func(t *testing.T) {
// load test data
jsonFile, err := os.Open("./testdata/logout-payload.json")
if err != nil {
fmt.Println(err)
}
defer jsonFile.Close()
var signOutInput SignOutInput
byteJSON, _ := ioutil.ReadAll(jsonFile)
json.Unmarshal(byteJSON, &signOutInput)
// create mock output
m := mockLogout{}
// create dependancy object
d := deps{
cognito: m,
}
//execute test of function
_, err = d.HandleRequest(nil, signOutInput)
if err != nil {
t.Error("Failed to logout user")
}
})
t.Run("Send incorrect token for logout", func(t *testing.T) {
// load test data
signOutInput := SignOutInput{AccessToken: "IncorrectToken"}
// create mock output
m := mockLogout{}
// create dependancy object
d := deps{
cognito: m,
}
//execute test of function
result, err := d.HandleRequest(nil, signOutInput)
if result.Message != "The access token provided is invalid" || err == nil {
t.Error("Failed to catch and handle and incorrect token exception")
}
})
}
|
package chainedhashmap
import "fmt"
type node struct {
val interface{}
next *node
}
type linkedlist struct {
len int
next *node
}
type chainedHashMap struct {
cap int
bucket []*linkedlist
}
//Init 初始化链式哈希表
func (h *chainedHashMap) Init(cap int) {
h.cap = cap
if h.cap != 0 {
h.bucket = make([]*linkedlist, h.cap, h.cap)
}
for _, v := range h.bucket {
v = new(linkedlist)
v.len, v.next = 0, nil
}
}
//Destroy 销毁链式哈希表
func (h *chainedHashMap) Destroy() {
for _, v := range h.bucket {
v.len, v.next = 0, nil
}
h.bucket = nil
}
//Hash 暂时假设传入的元素为数字,使用除留取余法构造哈希函数
func (h *chainedHashMap) Hash(item int) int {
return item % h.cap
}
//Lookup 查找元素是否已存在
func (h *chainedHashMap) Lookup(item int) bool {
key := h.Hash(item)
if h.bucket[key].len == 0 {
return false
}
tmp := h.bucket[key].next
for ; tmp != nil; tmp = tmp.next {
if tmp.val == item {
return true
}
}
return false
}
//Insert 向表中插入元素
func (h *chainedHashMap) Insert(item int) {
if h.Lookup(item) {
fmt.Println("item already exist!")
}
key := h.Hash(item)
tmp := h.bucket[key].next
if tmp == nil {
h.bucket[key].next = &node{item, nil}
h.bucket[key].len++
return
}
for tmp.next != nil {
tmp = tmp.next
}
tmp.next = &node{item, nil}
h.bucket[key].len++
}
//Remove 删除表中指定元素
func (h *chainedHashMap) Remove(item int) {
if !h.Lookup(item) {
fmt.Println("Can't remove because item not exist!")
}
key := h.Hash(item)
tmp := h.bucket[key].next
if tmp.val == item {
h.bucket[key].next = tmp.next
}
for tmp.next.val != item {
tmp = tmp.next
}
tmp.next = tmp.next.next
h.bucket[key].len--
}
//Size 返回哈希表元素总个数
func (h *chainedHashMap) Size() int {
var sum int
for _, v := range h.bucket {
sum += v.len
}
return sum
}
|
package auth
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path"
"testing"
"github.com/ubclaunchpad/inertia/common"
"github.com/stretchr/testify/assert"
)
func getTestPermissionsHandler(dir string) (*PermissionsHandler, error) {
err := os.Mkdir(dir, os.ModePerm)
if err != nil {
return nil, err
}
return NewPermissionsHandler(
path.Join(dir, "users.db"),
"127.0.0.1", 3000,
getFakeAPIKey,
)
}
func TestServeHTTPPublicPath(t *testing.T) {
dir := "./test_perm_publicpath"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
ph.AttachPublicHandlerFunc("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
req, err := http.NewRequest("POST", ts.URL+"/test", nil)
assert.Nil(t, err)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
func TestServeHTTPWithUserReject(t *testing.T) {
dir := "./test_perm_reject"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
ph.AttachUserRestrictedHandlerFunc("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
req, err := http.NewRequest("POST", ts.URL+"/test", nil)
assert.Nil(t, err)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusForbidden, resp.StatusCode)
}
func TestServeHTTPWithUserLoginAndLogout(t *testing.T) {
dir := "./test_perm_loginlogout"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
ph.AttachUserRestrictedHandlerFunc("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
// Register user
err = ph.users.AddUser("bobheadxi", "wowgreat", false)
assert.Nil(t, err)
// Login in as user, use cookiejar to catch cookie
user := &common.UserRequest{Username: "bobheadxi", Password: "wowgreat"}
body, err := json.Marshal(user)
assert.Nil(t, err)
req, err := http.NewRequest("POST", ts.URL+"/user/login", bytes.NewReader(body))
assert.Nil(t, err)
loginResp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer loginResp.Body.Close()
assert.Equal(t, http.StatusOK, loginResp.StatusCode)
// Check for cookies
assert.True(t, len(loginResp.Cookies()) > 0)
cookie := loginResp.Cookies()[0]
assert.Equal(t, "ubclaunchpad-inertia", cookie.Name)
// Attempt to validate
req, err = http.NewRequest("POST", ts.URL+"/user/validate", nil)
assert.Nil(t, err)
req.AddCookie(cookie)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
// Log out
req, err = http.NewRequest("POST", ts.URL+"/user/logout", nil)
assert.Nil(t, err)
req.AddCookie(cookie)
logoutResp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer logoutResp.Body.Close()
assert.Equal(t, http.StatusOK, logoutResp.StatusCode)
// Check for cookies
assert.True(t, len(logoutResp.Cookies()) > 0)
cookie = logoutResp.Cookies()[0]
assert.Equal(t, "ubclaunchpad-inertia", cookie.Name)
assert.Equal(t, 0, cookie.MaxAge)
}
func TestServeHTTPWithUserLoginAndAccept(t *testing.T) {
dir := "./test_perm_loginaccept"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
ph.AttachUserRestrictedHandlerFunc("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
// Register user
err = ph.users.AddUser("bobheadxi", "wowgreat", false)
assert.Nil(t, err)
// Login in as user, use cookiejar to catch cookie
user := &common.UserRequest{Username: "bobheadxi", Password: "wowgreat"}
body, err := json.Marshal(user)
assert.Nil(t, err)
req, err := http.NewRequest("POST", ts.URL+"/user/login", bytes.NewReader(body))
assert.Nil(t, err)
loginResp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer loginResp.Body.Close()
assert.Equal(t, http.StatusOK, loginResp.StatusCode)
// Check for cookies
assert.True(t, len(loginResp.Cookies()) > 0)
cookie := loginResp.Cookies()[0]
assert.Equal(t, "ubclaunchpad-inertia", cookie.Name)
// Attempt to access restricted endpoint with cookie
req, err = http.NewRequest("POST", ts.URL+"/test", nil)
assert.Nil(t, err)
req.AddCookie(cookie)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
func TestServeHTTPDenyNonAdmin(t *testing.T) {
dir := "./test_perm_denynonadmin"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
ph.AttachAdminRestrictedHandlerFunc("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
// Register user
err = ph.users.AddUser("bobheadxi", "wowgreat", false)
assert.Nil(t, err)
// Login in as user, use cookiejar to catch cookie
user := &common.UserRequest{Username: "bobheadxi", Password: "wowgreat"}
body, err := json.Marshal(user)
assert.Nil(t, err)
req, err := http.NewRequest("POST", ts.URL+"/user/login", bytes.NewReader(body))
assert.Nil(t, err)
loginResp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer loginResp.Body.Close()
assert.Equal(t, http.StatusOK, loginResp.StatusCode)
// Check for cookies
assert.True(t, len(loginResp.Cookies()) > 0)
cookie := loginResp.Cookies()[0]
assert.Equal(t, "ubclaunchpad-inertia", cookie.Name)
// Attempt to access restricted endpoint with cookie
req, err = http.NewRequest("POST", ts.URL+"/test", nil)
assert.Nil(t, err)
req.AddCookie(cookie)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusForbidden, resp.StatusCode)
}
func TestServeHTTPAllowAdmin(t *testing.T) {
dir := "./test_perm_disallowadmin"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
ph.AttachAdminRestrictedHandlerFunc("/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
// Register user
err = ph.users.AddUser("bobheadxi", "wowgreat", true)
assert.Nil(t, err)
// Login in as user, use cookiejar to catch cookie
user := &common.UserRequest{Username: "bobheadxi", Password: "wowgreat"}
body, err := json.Marshal(user)
assert.Nil(t, err)
req, err := http.NewRequest("POST", ts.URL+"/user/login", bytes.NewReader(body))
assert.Nil(t, err)
loginResp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer loginResp.Body.Close()
assert.Equal(t, http.StatusOK, loginResp.StatusCode)
// Check for cookies
assert.True(t, len(loginResp.Cookies()) > 0)
cookie := loginResp.Cookies()[0]
assert.Equal(t, "ubclaunchpad-inertia", cookie.Name)
// Attempt to access restricted endpoint with cookie
req, err = http.NewRequest("POST", ts.URL+"/test", nil)
assert.Nil(t, err)
req.AddCookie(cookie)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
func TestUserControlHandlers(t *testing.T) {
dir := "./test_perm_usercontrol"
ts := httptest.NewServer(nil)
defer ts.Close()
// Set up permission handler
ph, err := getTestPermissionsHandler(dir)
defer os.RemoveAll(dir)
assert.Nil(t, err)
defer ph.Close()
ts.Config.Handler = ph
// Test handler uses the getFakeAPIToken keylookup, which
// will match with the testToken
bearerTokenString := fmt.Sprintf("Bearer %s", testToken)
// Add a new user
body, err := json.Marshal(&common.UserRequest{
Username: "jimmyneutron",
Password: "asfasdlfjk",
Admin: false,
})
assert.Nil(t, err)
payload := bytes.NewReader(body)
req, err := http.NewRequest("POST", ts.URL+"/user/adduser", payload)
assert.Nil(t, err)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", bearerTokenString)
resp, err := http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusCreated, resp.StatusCode)
// Remove a user
body, err = json.Marshal(&common.UserRequest{
Username: "jimmyneutron",
})
assert.Nil(t, err)
payload = bytes.NewReader(body)
req, err = http.NewRequest("POST", ts.URL+"/user/removeuser", payload)
assert.Nil(t, err)
req.Header.Set("Authorization", bearerTokenString)
resp, err = http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
// List users
req, err = http.NewRequest("POST", ts.URL+"/user/listusers", nil)
assert.Nil(t, err)
req.Header.Set("Authorization", bearerTokenString)
resp, err = http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
// Reset all users
req, err = http.NewRequest("POST", ts.URL+"/user/resetusers", nil)
assert.Nil(t, err)
req.Header.Set("Authorization", bearerTokenString)
resp, err = http.DefaultClient.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.