text stringlengths 11 4.05M |
|---|
package Word_Ladder
import "math"
func ladderLength(beginWord string, endWord string, wordList []string) int {
wordId := make(map[string]int)
graph := make([][]int, 0)
addWord := func(word string) int {
id, has := wordId[word]
if has {
return id
}
wordId[word] = len(wordId)
graph = append(graph, []int{})
return wordId[word]
}
addEdge := func(word string) int {
id1 := addWord(word)
bts := []byte(word)
for i := 0; i < len(word); i++ {
bts[i] = '*'
id2 := addWord(string(bts))
bts[i] = word[i]
graph[id1] = append(graph[id1], id2)
graph[id2] = append(graph[id2], id1)
}
return id1
}
for _, word := range wordList {
addEdge(word)
}
endId, endExist := wordId[endWord]
if !endExist {
return 0
}
beginId := addEdge(beginWord)
const initDist = math.MaxInt32
endQueue := []int{endId}
endDist := make([]int, len(wordId))
for i := range endDist {
endDist[i] = initDist
}
endDist[endId] = 0
beginQueue := []int{beginId}
beginDist := make([]int, len(wordId))
for i := range beginDist {
beginDist[i] = initDist
}
beginDist[beginId] = 0
for len(beginQueue) > 0 && len(endQueue) > 0 {
queue := beginQueue
beginQueue = nil
for _, beginLast := range queue {
if endDist[beginLast] < initDist {
return (beginDist[beginLast]+endDist[beginLast])/2 + 1
}
for _, wid := range graph[beginLast] {
if beginDist[wid] == initDist {
beginDist[wid] = beginDist[beginLast] + 1
beginQueue = append(beginQueue, wid)
}
}
}
queue = endQueue
endQueue = nil
for _, endLast := range queue {
if beginDist[endLast] < initDist {
return (beginDist[endLast]+endDist[endLast])/2 + 1
}
for _, wid := range graph[endLast] {
if endDist[wid] == initDist {
endDist[wid] = endDist[endLast] + 1
endQueue = append(endQueue, wid)
}
}
}
}
return 0
}
|
package id_035
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
/*
思路1:双指针法
给定两个链表的指针p1, p2分别标记l1, l2
对p1,p2所指的节点进行比较将小的插入到新的链表中
如果两边都比较完成之后、还有多余的元素没有插入、说明未插入元素比前面的都大、直接追加到后面
*/
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
p1 := l1
p2 := l2
p := &ListNode{}
head := p
for p1 != nil && p2 != nil {
if p1.Val <= p2.Val {
p.Next = p1
p1 = p1.Next
} else {
p.Next = p2
p2 = p2.Next
}
p = p.Next
}
if p1 != nil {
p.Next = p1
}
if p2 != nil {
p.Next = p2
}
return head.Next
}
/*
思路2:递归
返回值:排好序的链表头
终止条件:l1为空或者l2为空
调用单元实现功能:l1与l2进行比较、如果l1.val<l2.val、则l1作为当前子链表表头、再将l1.next.val再与l2.val进行比较
*/
func mergeTwoLists2(l1 *ListNode, l2 *ListNode) *ListNode {
if l1 == nil {
return l2
}
if l2 == nil {
return l1
}
if l1.Val < l2.Val {
l1.Next = mergeTwoLists(l1.Next, l2)
return l1
} else {
l2.Next = mergeTwoLists(l1, l2.Next)
return l2
}
}
/*
思路3:直接遍历
给定一个空节点作为虚拟头节点
在遍历的同时比较l1和l2大小、只要有一个为空、跳出循环
*/
func mergeTwoLists3(l1 *ListNode, l2 *ListNode) *ListNode {
dummy := &ListNode{}
node := dummy
for ;l1 != nil || l2 != nil; node = node.Next {
if l1 == nil {
node.Next = l2
break
}
if l2 == nil {
node.Next = l1
break
}
if l1.Val >= l2.Val {
node.Next = l2
l2 = l2.Next
} else {
node.Next = l1
l1 = l1.Next
}
}
return dummy
} |
package problem0012
func intToRoman(num int) string {
m := map[int]string{
1: "I",
4: "IV",
5: "V",
9: "IX",
10: "X",
40: "XL",
50: "L",
90: "XC",
100: "C",
400: "CD",
500: "D",
900: "CM",
1000: "M",
}
carry, remainder := 1, 0
res := ""
for num != 0 {
remainder = num % 10
if remainder != 0 {
switch {
case (0 < remainder && remainder < 4):
for i := 0; i < remainder; i++ {
res = m[carry] + res
}
case (5 < remainder && remainder < 9):
for i := 5; i < remainder; i++ {
res = m[carry] + res
}
res = m[5*carry] + res
default:
res = m[remainder*carry] + res
}
}
carry *= 10
num /= 10
}
return res
}
|
package servermiddleware
import (
"bytes"
"encoding/json"
"github.com/gin-gonic/gin"
"io/ioutil"
"net/http"
redis "xj_web_server/cache"
"xj_web_server/module"
"xj_web_server/util"
"xj_web_server/util/jwt"
//"strconv"
)
//type BaseAuthReq struct {
// BaseReq
// Uid int `form:"uid" json:"uid" binding:"required"`
//}
//token验证
func BaseAuth() gin.HandlerFunc {
return func(c *gin.Context) {
var authReq BaseReq
req, err := c.GetRawData()
if err != nil {
util.Logger.Errorf("BaseAuth 参数绑定 出错 err: %s ", err.Error())
c.AbortWithStatusJSON(http.StatusOK, module.ApiResp{ErrorNo: http.StatusForbidden, ErrorMsg: err.Error()})
return
}
//传递参数到下个中间件
c.Request.Body = ioutil.NopCloser(bytes.NewBuffer(req)) // 关键点
err = json.Unmarshal(req, &authReq)
if err != nil {
util.Logger.Errorf("BaseAuth 参数绑定 出错 err: %s ", err.Error())
c.AbortWithStatusJSON(http.StatusOK, module.ApiResp{ErrorNo: http.StatusForbidden, ErrorMsg: err.Error()})
return
}
token := c.GetHeader("token")
if token == "" {
//权限异常
c.AbortWithStatusJSON(http.StatusOK, module.ApiResp{
ErrorNo: http.StatusForbidden,
ErrorMsg: http.StatusText(http.StatusForbidden),
})
return
}
et := jwt.EasyToken{}
valid, tokenUid, err := et.ValidateToken(token)
if !valid {
if err != nil {
util.Logger.Errorf("BaseAuth token 验证 出错 err: %s\n%s ", token, err.Error())
}
c.AbortWithStatusJSON(http.StatusOK, module.ApiResp{
ErrorNo: http.StatusForbidden,
ErrorMsg: "token validate failed, please login again.",
})
return
}
//验证token是否存在
//1,查询redis
tokenRedis, err := redis.GetRedisDb().Get(util.RedisKeyToken + tokenUid + ":").Result()
if tokenRedis != token {
if err != nil {
util.Logger.Errorf("BaseAuth token 验证 出错 err: 客户端:%s\nredis:%s\nerr:%s ", tokenRedis,token, err.Error())
}else{
util.Logger.Errorf("BaseAuth token 验证 出错 err: 客户端:%s\nredis:%s", tokenRedis,token)
}
c.AbortWithStatusJSON(http.StatusOK, module.ApiResp{
ErrorNo: http.StatusForbidden,
ErrorMsg: "token failed, please login again.",
})
return
}
c.Set("uid", tokenUid)
}
}
|
package http
// WatchRequest is /watch request model
type WatchRequest struct {
Service string `json:"service"` // Service name (to differentiate multiple requestors): 1..64
PublicKeys []string `json:"public_keys"` // Destination wallet address in Base58
Callback string `json:"callback"` // Callback for notification: 1..256
}
// UnwatchRequest is /unwatch request model
type UnwatchRequest struct {
Service string `json:"service"` // Service name (to differentiate multiple requestors): 1..64
PublicKeys []string `json:"public_keys"` // Destination wallet address in Base58
}
// RefillEvent is notification model
type RefillEvent struct {
Service string `json:"service"` // Service name (to differentiate multiple requestors): 1..64
PublicKey string `json:"public_key"` // Destination (watching) wallet address in Base58
From string `json:"from"` // Source wallet address in Base58
Token string `json:"token"` // GOLD or MNT
Amount string `json:"amount"` // Token amount in major units: 1.234 (18 decimal places)
Transaction string `json:"transaction"` // Digest of the refilling tx in Base58
}
|
package main
import (
"fmt"
"io/ioutil"
)
func main() {
fmt.Println("Largest Power - Ranking!!")
dat, err := ioutil.ReadFile("./bench.out")
if err != nil {
panic(err)
}
fmt.Print(string(dat))
}
|
package main
import (
"encoding/hex"
"fmt"
"net"
"os"
"strconv"
)
func stringHex2Binary(strHex string) (string, error) {
strByte := []byte(strHex)
strBianry := ""
for _, data := range strByte {
str, err := strconv.ParseInt(string(data), 16, 10)
if err != nil {
return "", err
}
strBianry = fmt.Sprintf("%s%b", strBianry, str)
}
return strBianry, nil
}
func FullIPv6(ip net.IP) string {
dst := make([]byte, hex.EncodedLen(len(ip)))
_ = hex.Encode(dst, ip)
tmpRet := string(dst[0:4]) + ":" +
string(dst[4:8]) + ":" +
string(dst[8:12]) + ":" +
string(dst[12:16]) + ":" +
string(dst[16:20]) + ":" +
string(dst[20:24]) + ":" +
string(dst[24:28]) + ":" +
string(dst[28:])
return tmpRet
}
func FullIPv6WithMask(ipFull string, mask string) (string, error) {
maskB, err := stringHex2Binary(mask)
if err != nil {
return "", err
}
retIp := make([]byte, 0, len(maskB))
OriIp := []byte(ipFull)
ipk := 0
var k int
for k = 1; k <= len(maskB); k++ {
if maskB[k-1] == '0' {
break
}
if k%4 == 0 {
if k != 0 && k%16 == 0 {
retIp = append(retIp, OriIp[ipk])
ipk++
}
retIp = append(retIp, OriIp[ipk])
ipk++
}
}
fmt.Println(k, ipk)
if (k-1)%16 != 0 {
return "", fmt.Errorf("ip format not support")
}
return fmt.Sprintf("%s:/%d", string(retIp), k-1), nil
}
func getIP(ipOri net.IP) (string, error) {
ipV4 := ipOri.To4()
if ipV4 != nil {
return ipV4.String(), nil
}
ipV6 := ipOri.To16()
if ipV6 != nil {
return FullIPv6(ipV6), nil
}
return "", fmt.Errorf("ip %s invalid", ipOri.String())
}
var allowMask = map[string]map[int]bool{
"ipv4": {8: true, 16: true, 24: true, 32: true},
"ipv6": {64: true, 128: true},
}
func checkAndGetIP(ipOri string) (string, error) {
ip := net.ParseIP(ipOri)
if ip != nil {
return getIP(ip)
} else {
ipSpefic, ipNet, err := net.ParseCIDR(ipOri)
if err != nil {
return "", err
}
speficMask, maxMask := ipNet.Mask.Size()
if speficMask == maxMask {
return getIP(ipSpefic)
}
if ipSpefic.To4() != nil {
if !allowMask["ipv4"][speficMask] {
fmt.Printf("ip mask err %s\n", ipOri)
}
return ipNet.String(), nil
}
ipTmp := ipSpefic.To16()
if ipTmp != nil {
speficMasks := ipNet.Mask.String()
if !allowMask["ipv6"][speficMask] {
fmt.Printf("ip mask err %s \n", ipOri)
}
ipv6Full, err := getIP(ipSpefic)
ipv6, err := FullIPv6WithMask(ipv6Full, speficMasks)
return ipv6, err
}
return ipNet.String(), nil
}
return "", fmt.Errorf("unknow error")
}
func main() {
ipOri := os.Args[1]
//retIp, err := checkAndGetIP(ipOri)
//fmt.Println("output")
//fmt.Println(retIp, err)
{
ip4 := ipOri
ipv4 := net.ParseIP(ip4)
fmt.Println(ipv4, ipv4.To4())
fmt.Println(ipv4, ipv4.To16())
}
}
|
package main
import (
"github.com/mitchellh/cli"
"github.com/pragkent/aliyun-disk/command"
)
func Commands(meta *command.Meta) map[string]cli.CommandFactory {
return map[string]cli.CommandFactory{
"init": func() (cli.Command, error) {
return &command.InitCommand{
Meta: *meta,
}, nil
},
"attach": func() (cli.Command, error) {
return &command.AttachCommand{
Meta: *meta,
}, nil
},
"isattached": func() (cli.Command, error) {
return &command.IsAttachedCommand{
Meta: *meta,
}, nil
},
"detach": func() (cli.Command, error) {
return &command.DetachCommand{
Meta: *meta,
}, nil
},
"mountdevice": func() (cli.Command, error) {
return &command.MountDeviceCommand{
Meta: *meta,
}, nil
},
"unmountdevice": func() (cli.Command, error) {
return &command.UnmountDeviceCommand{
Meta: *meta,
}, nil
},
"mount": func() (cli.Command, error) {
return &command.MountCommand{
Meta: *meta,
}, nil
},
"unmount": func() (cli.Command, error) {
return &command.UnmountCommand{
Meta: *meta,
}, nil
},
"waitforattach": func() (cli.Command, error) {
return &command.WaitForAttachCommand{
Meta: *meta,
}, nil
},
"getvolumename": func() (cli.Command, error) {
return &command.GetVolumeNameCommand{
Meta: *meta,
}, nil
},
"version": func() (cli.Command, error) {
return &command.VersionCommand{
Meta: *meta,
Version: Version,
Revision: GitCommit,
Name: Name,
}, nil
},
}
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tracing
import (
"context"
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/docker/compose/v2/internal"
"go.opentelemetry.io/otel/attribute"
"github.com/docker/cli/cli/command"
"github.com/moby/buildkit/util/tracing/detect"
_ "github.com/moby/buildkit/util/tracing/detect/delegated" //nolint:blank-imports
_ "github.com/moby/buildkit/util/tracing/env" //nolint:blank-imports
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
)
func init() {
detect.ServiceName = "compose"
// do not log tracing errors to stdio
otel.SetErrorHandler(skipErrors{})
}
var Tracer = otel.Tracer("compose")
// OTLPConfig contains the necessary values to initialize an OTLP client
// manually.
//
// This supports a minimal set of options based on what is necessary for
// automatic OTEL configuration from Docker context metadata.
type OTLPConfig struct {
Endpoint string
}
// ShutdownFunc flushes and stops an OTEL exporter.
type ShutdownFunc func(ctx context.Context) error
// envMap is a convenience type for OS environment variables.
type envMap map[string]string
func InitTracing(dockerCli command.Cli) (ShutdownFunc, error) {
// set global propagator to tracecontext (the default is no-op).
otel.SetTextMapPropagator(propagation.TraceContext{})
if v, _ := strconv.ParseBool(os.Getenv("COMPOSE_EXPERIMENTAL_OTEL")); !v {
return nil, nil
}
return InitProvider(dockerCli)
}
func InitProvider(dockerCli command.Cli) (ShutdownFunc, error) {
ctx := context.Background()
var errs []error
var exporters []sdktrace.SpanExporter
envClient, otelEnv := traceClientFromEnv()
if envClient != nil {
if envExporter, err := otlptrace.New(ctx, envClient); err != nil {
errs = append(errs, err)
} else if envExporter != nil {
exporters = append(exporters, envExporter)
}
}
if dcClient, err := traceClientFromDockerContext(dockerCli, otelEnv); err != nil {
errs = append(errs, err)
} else if dcClient != nil {
if dcExporter, err := otlptrace.New(ctx, dcClient); err != nil {
errs = append(errs, err)
} else if dcExporter != nil {
exporters = append(exporters, dcExporter)
}
}
if len(errs) != 0 {
return nil, errors.Join(errs...)
}
res, err := resource.New(
ctx,
resource.WithAttributes(
semconv.ServiceName("compose"),
semconv.ServiceVersion(internal.Version),
attribute.String("docker.context", dockerCli.CurrentContext()),
),
)
if err != nil {
return nil, fmt.Errorf("failed to create resource: %v", err)
}
muxExporter := MuxExporter{exporters: exporters}
sp := sdktrace.NewSimpleSpanProcessor(muxExporter)
tracerProvider := sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.AlwaysSample()),
sdktrace.WithResource(res),
sdktrace.WithSpanProcessor(sp),
)
otel.SetTracerProvider(tracerProvider)
// Shutdown will flush any remaining spans and shut down the exporter.
return tracerProvider.Shutdown, nil
}
// traceClientFromEnv creates a GRPC OTLP client based on OS environment
// variables.
//
// https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
func traceClientFromEnv() (otlptrace.Client, envMap) {
hasOtelEndpointInEnv := false
otelEnv := make(map[string]string)
for _, kv := range os.Environ() {
k, v, ok := strings.Cut(kv, "=")
if !ok {
continue
}
if strings.HasPrefix(k, "OTEL_") {
otelEnv[k] = v
if strings.HasSuffix(k, "ENDPOINT") {
hasOtelEndpointInEnv = true
}
}
}
if !hasOtelEndpointInEnv {
return nil, nil
}
client := otlptracegrpc.NewClient()
return client, otelEnv
}
|
package main
import "fmt"
const TestVersion = 1
func main() {
var (
input string
)
fmt.Print("Enter a name! \n> ")
fmt.Scanln(&input)
fmt.Println(HelloWorld(input))
}
func HelloWorld(input string) string {
if input == "" {
input = "World"
}
return "Hello, " + input + "!"
}
|
package tappx
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"text/template"
"time"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/macros"
"github.com/prebid/prebid-server/openrtb_ext"
)
const TAPPX_BIDDER_VERSION = "1.5"
const TYPE_CNN = "prebid"
type TappxAdapter struct {
endpointTemplate *template.Template
}
type Bidder struct {
Tappxkey string `json:"tappxkey"`
Mktag string `json:"mktag,omitempty"`
Bcid []string `json:"bcid,omitempty"`
Bcrid []string `json:"bcrid,omitempty"`
}
type Ext struct {
Bidder `json:"bidder"`
}
// Builder builds a new instance of the Tappx adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
template, err := template.New("endpointTemplate").Parse(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("unable to parse endpoint url template: %v", err)
}
bidder := &TappxAdapter{
endpointTemplate: template,
}
return bidder, nil
}
func (a *TappxAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
if len(request.Imp) == 0 {
return nil, []error{&errortypes.BadInput{
Message: "No impression in the bid request",
}}
}
var bidderExt adapters.ExtImpBidder
if err := json.Unmarshal(request.Imp[0].Ext, &bidderExt); err != nil {
return nil, []error{&errortypes.BadInput{
Message: "Error parsing bidderExt object",
}}
}
var tappxExt openrtb_ext.ExtImpTappx
if err := json.Unmarshal(bidderExt.Bidder, &tappxExt); err != nil {
return nil, []error{&errortypes.BadInput{
Message: "Error parsing tappxExt parameters",
}}
}
ext := Ext{
Bidder: Bidder{
Tappxkey: tappxExt.TappxKey,
Mktag: tappxExt.Mktag,
Bcid: tappxExt.Bcid,
Bcrid: tappxExt.Bcrid,
},
}
if jsonext, err := json.Marshal(ext); err == nil {
request.Ext = jsonext
} else {
return nil, []error{&errortypes.FailedToRequestBids{
Message: "Error marshaling tappxExt parameters",
}}
}
var test int
test = int(request.Test)
url, err := a.buildEndpointURL(&tappxExt, test)
if url == "" {
return nil, []error{err}
}
if tappxExt.BidFloor > 0 {
request.Imp[0].BidFloor = tappxExt.BidFloor
}
reqJSON, err := json.Marshal(request)
if err != nil {
return nil, []error{&errortypes.BadInput{
Message: "Error parsing reqJSON object",
}}
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
return []*adapters.RequestData{{
Method: "POST",
Uri: url,
Body: reqJSON,
Headers: headers,
}}, []error{}
}
// Builds enpoint url based on adapter-specific pub settings from imp.ext
func (a *TappxAdapter) buildEndpointURL(params *openrtb_ext.ExtImpTappx, test int) (string, error) {
if params.Endpoint == "" {
return "", &errortypes.BadInput{
Message: "Tappx endpoint undefined",
}
}
if params.TappxKey == "" {
return "", &errortypes.BadInput{
Message: "Tappx key undefined",
}
}
tappxHost := "tappx.com"
isNewEndpoint, err := regexp.Match(`^(zz|vz)[0-9]{3,}([a-z]{2,3}|test)$`, []byte(params.Endpoint))
if isNewEndpoint {
tappxHost = params.Endpoint + ".pub.tappx.com/rtb/"
} else {
tappxHost = "ssp.api.tappx.com/rtb/v2/"
}
endpointParams := macros.EndpointTemplateParams{Host: tappxHost}
host, err := macros.ResolveMacros(a.endpointTemplate, endpointParams)
if err != nil {
return "", &errortypes.BadInput{
Message: "Unable to parse endpoint url template: " + err.Error(),
}
}
thisURI, err := url.Parse(host)
if err != nil {
return "", &errortypes.BadInput{
Message: "Malformed URL: " + err.Error(),
}
}
if !isNewEndpoint {
thisURI.Path += params.Endpoint
}
queryParams := url.Values{}
queryParams.Add("tappxkey", params.TappxKey)
if test == 0 {
t := time.Now().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond))
queryParams.Add("ts", strconv.Itoa(int(t)))
}
queryParams.Add("v", TAPPX_BIDDER_VERSION)
queryParams.Add("type_cnn", TYPE_CNN)
thisURI.RawQuery = queryParams.Encode()
return thisURI.String(), nil
}
func (a *TappxAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
if response.StatusCode != http.StatusOK {
return nil, []error{fmt.Errorf("Unexpected status code: %d. Run with request.debug = 1 for more info", response.StatusCode)}
}
var bidResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(5)
for _, sb := range bidResp.SeatBid {
for i := 0; i < len(sb.Bid); i++ {
bid := sb.Bid[i]
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &bid,
BidType: getMediaTypeForImp(bid.ImpID, internalRequest.Imp),
})
}
}
return bidResponse, []error{}
}
func getMediaTypeForImp(impId string, imps []openrtb2.Imp) openrtb_ext.BidType {
mediaType := openrtb_ext.BidTypeBanner
for _, imp := range imps {
if imp.ID == impId {
if imp.Video != nil {
mediaType = openrtb_ext.BidTypeVideo
}
return mediaType
}
}
return mediaType
}
|
package main
import "fmt"
//一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
//
// 机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
//
// 现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
//
//
//
// 网格中的障碍物和空位置分别用 1 和 0 来表示。
//
// 说明:m 和 n 的值均不超过 100。
//
// 示例 1:
//
// 输入:
//[
// [0,0,0],
// [0,1,0],
// [0,0,0]
//]
//输出: 2
//解释:
//3x3 网格的正中间有一个障碍物。
//从左上角到右下角一共有 2 条不同的路径:
//1. 向右 -> 向右 -> 向下 -> 向下
//2. 向下 -> 向下 -> 向右 -> 向右
//
// Related Topics 数组 动态规划
/*
与62思路大致相同、就是需要添加一个为0的判断
采用bottom up的方式处理
注意:因为有了障碍物、所以这里不能再单独判断边界
时间复杂度:O(n^2)
*/
//leetcode submit region begin(Prohibit modification and deletion)
func uniquePathsWithObstacles(obstacleGrid [][]int) int {
if len(obstacleGrid) == 0 || len(obstacleGrid[0]) == 0{
return 0
}
result := make([][]int, len(obstacleGrid))
for i := 0; i < len(result); i++ {
result[i] = make([]int, len(obstacleGrid[i]))
}
for i := len(result) - 1; i >= 0; i-- {
for j := len(result[i]) - 1; j >= 0; j-- {
if obstacleGrid[i][j] == 1 {
// 有障碍物
result[i][j] = 0
} else {
// end
if i == len(result) - 1 && j == len(result[i]) - 1 {
result[i][j] = 1
continue
}
// 下边界
if i == len(result) - 1 && j != len(result[i]) - 1 {
result[i][j] = result[i][j + 1]
continue
}
// 右边界
if i != len(result) - 1 && j == len(result[i]) - 1 {
result[i][j] = result[i + 1][j]
continue
}
result[i][j] = result[i + 1][j] + result[i][j + 1]
}
}
}
return result[0][0]
}
//leetcode submit region end(Prohibit modification and deletion)
/*
思路2:递归
todo 超时
*/
func uniquePathsWithObstacles2(obstacleGrid [][]int) int {
if len(obstacleGrid) == 0 || len(obstacleGrid[0]) == 0 {
return 0
}
// 判断单行或者单列
if len(obstacleGrid) == 1 || len(obstacleGrid[0]) == 1 {
for i := 0; i < len(obstacleGrid); i++ {
for j := 0; j < len(obstacleGrid[i]); j++ {
if obstacleGrid[i][j] == 1 {
return 0
}
}
}
return 1
}
// 判断头部是否是障碍
if obstacleGrid[0][0] == 1 {
return 0
}
result := make([][]int, len(obstacleGrid))
for i := 0; i < len(result); i++ {
result[i] = make([]int, len(obstacleGrid[i]))
}
return _paths_with_obs(obstacleGrid, result, 0, 1) + _paths_with_obs(obstacleGrid, result, 1, 0)
}
func _paths_with_obs(obs [][]int, result [][]int, row int, col int) int {
// terminal
if row == len(obs) - 1 && col == len(obs[row]) - 1 {
// end
if obs[row][col] == 1 {
return 0
}
return 1
}
// current logic
if obs[row][col] == 1 {
// 障碍物
result[row][col] = 0
return 0
}
// 检查该节点是否已经处理过
if result[row][col] != 0 {
return result[row][col]
}
// 下边界
if row == len(obs) - 1 {
return _paths_with_obs(obs, result, row, col + 1)
}
// 右边界
if col == len(obs[row]) - 1 {
return _paths_with_obs(obs, result, row + 1, col)
}
result[row][col] = _paths_with_obs(obs, result, row + 1, col) + _paths_with_obs(obs, result, row, col + 1)
// drill down
return result[row][col]
}
func main() {
obs := [][]int{{0,0,0},{0,1,0},{0,0,0}}
fmt.Println(uniquePathsWithObstacles2(obs))
} |
package cmd
import (
"context"
"io"
"os/exec"
"runtime"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tilt-dev/tilt/internal/localexec"
"github.com/tilt-dev/tilt/internal/testutils"
"github.com/tilt-dev/tilt/internal/testutils/bufsync"
"github.com/tilt-dev/tilt/internal/testutils/tempdir"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
func TestTrue(t *testing.T) {
f := newProcessExecFixture(t)
f.start("exit 0")
f.assertCmdSucceeds()
}
func TestWorkdir(t *testing.T) {
f := newProcessExecFixture(t)
d := tempdir.NewTempDirFixture(t)
cmd := "pwd"
if runtime.GOOS == "windows" {
cmd = "cd"
}
f.startWithWorkdir(cmd, d.Path())
f.assertCmdSucceeds()
f.assertLogContains(d.Path())
}
func TestSleep(t *testing.T) {
f := newProcessExecFixture(t)
cmd := "sleep 1"
if runtime.GOOS == "windows" {
// believe it or not, this is the idiomatic way to sleep on windows
// https://www.ibm.com/support/pages/timeout-command-run-batch-job-exits-immediately-and-returns-error-input-redirection-not-supported-exiting-process-immediately
cmd = "ping -n 1 127.0.0.1"
}
f.start(cmd)
f.waitForStatus(Running)
time.Sleep(time.Second)
f.assertCmdSucceeds()
}
func TestShutdownOnCancel(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("no bash on windows")
}
f := newProcessExecFixture(t)
cmd := `
function cleanup()
{
echo "cleanup time!"
exit 1
}
trap cleanup EXIT
sleep 100
`
f.start(cmd)
f.cancel()
time.Sleep(time.Second)
f.waitForStatus(Done)
f.assertLogContains("cleanup time")
}
func TestPrintsLogs(t *testing.T) {
f := newProcessExecFixture(t)
f.start("echo testing123456")
f.assertCmdSucceeds()
f.assertLogContains("testing123456")
}
func TestHandlesExits(t *testing.T) {
f := newProcessExecFixture(t)
f.start("exit 1")
f.waitForError()
f.assertLogContains("exited with exit code 1")
}
func TestStopsGrandchildren(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("no bash on windows")
}
f := newProcessExecFixture(t)
f.start("bash -c '(for i in $(seq 1 20); do echo loop$i; sleep 1; done)'")
f.waitForStatus(Running)
// wait until there's log output
timeout := time.After(time.Second)
for {
if strings.Contains(f.testWriter.String(), "loop1") {
break
}
select {
case <-timeout:
t.Fatal("never saw any process output")
case <-time.After(20 * time.Millisecond):
}
}
// cancel the context
f.cancel()
f.waitForStatus(Done)
}
func TestHandlesProcessThatFailsToStart(t *testing.T) {
f := newProcessExecFixture(t)
f.startMalformedCommand()
f.waitForError()
f.assertLogContains("failed to start: ")
}
func TestExecEmpty(t *testing.T) {
f := newProcessExecFixture(t)
f.start("")
f.waitForError()
f.assertLogContains("empty cmd")
}
func TestExecCmd(t *testing.T) {
testCases := execTestCases()
l := logger.NewLogger(logger.NoneLvl, io.Discard)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
c, err := localexec.EmptyEnv().ExecCmd(tc.cmd, l)
require.NoError(t, err)
assertCommandEqual(t, tc.cmd, c)
})
}
}
type execTestCase struct {
name string
cmd model.Cmd
}
func execTestCases() []execTestCase {
// these need to appear as actual paths or exec.Command will attempt to resolve them
// (their actual existence is irrelevant since they won't actually execute; similarly,
// it won't matter that they're unix paths even on Windows)
return []execTestCase{
{"command only", model.Cmd{Argv: []string{"/bin/ls"}}},
{"command array", model.Cmd{Argv: []string{"/bin/echo", "hi"}}},
{"current working directory", model.Cmd{Argv: []string{"/bin/echo", "hi"}, Dir: "/foo"}},
{"env", model.Cmd{Argv: []string{"/bin/echo", "hi"}, Env: []string{"FOO=bar"}}},
}
}
func assertCommandEqual(t *testing.T, expected model.Cmd, actual *exec.Cmd) {
t.Helper()
assert.Equal(t, expected.Argv[0], actual.Path)
assert.Equal(t, expected.Argv, actual.Args)
assert.Equal(t, expected.Dir, actual.Dir)
for _, e := range expected.Env {
assert.Contains(t, actual.Env, e)
}
}
type processExecFixture struct {
t *testing.T
ctx context.Context
cancel context.CancelFunc
execer *processExecer
testWriter *bufsync.ThreadSafeBuffer
statusCh chan statusAndMetadata
}
func newProcessExecFixture(t *testing.T) *processExecFixture {
execer := NewProcessExecer(localexec.EmptyEnv())
execer.gracePeriod = time.Second
testWriter := bufsync.NewThreadSafeBuffer()
ctx, _, _ := testutils.ForkedCtxAndAnalyticsForTest(testWriter)
ctx, cancel := context.WithCancel(ctx)
ret := &processExecFixture{
t: t,
ctx: ctx,
cancel: cancel,
execer: execer,
testWriter: testWriter,
}
t.Cleanup(ret.tearDown)
return ret
}
func (f *processExecFixture) tearDown() {
f.cancel()
}
func (f *processExecFixture) startMalformedCommand() {
c := model.Cmd{Argv: []string{"\""}, Dir: "."}
f.statusCh = f.execer.Start(f.ctx, c, f.testWriter)
}
func (f *processExecFixture) startWithWorkdir(cmd string, workdir string) {
c := model.ToHostCmd(cmd)
c.Dir = workdir
f.statusCh = f.execer.Start(f.ctx, c, f.testWriter)
}
func (f *processExecFixture) start(cmd string) {
f.startWithWorkdir(cmd, ".")
}
func (f *processExecFixture) assertCmdSucceeds() {
f.waitForStatus(Done)
}
func (f *processExecFixture) waitForStatus(expectedStatus status) {
deadlineCh := time.After(2 * time.Second)
for {
select {
case sm, ok := <-f.statusCh:
if !ok {
f.t.Fatal("statusCh closed")
}
if expectedStatus == sm.status {
return
}
if sm.status == Error {
f.t.Error("Unexpected Error")
return
}
if sm.status == Done {
f.t.Error("Unexpected Done")
return
}
case <-deadlineCh:
f.t.Fatal("Timed out waiting for cmd sm")
}
}
}
func (f *processExecFixture) assertLogContains(s string) {
require.Eventuallyf(f.t, func() bool {
return strings.Contains(f.testWriter.String(), s)
}, time.Second, 5*time.Millisecond, "log contains %q", s)
}
func (f *processExecFixture) waitForError() {
f.waitForStatus(Error)
}
|
// Code generated; DANGER ZONE FOR EDITS
package data
import (
"bytes"
"encoding/json"
"fmt"
"gopkg.in/yaml.v2"
)
const PresentationNodeDefinitionName = "presentation-node"
type PresentationNodeDefinitions map[string]PresentationNodeDefinition
func (d PresentationNodeDefinitions) Keys() (out []string) {
for k := range d {
out = append(out, k)
}
return out
}
func (d PresentationNodeDefinitions) Values() (out []PresentationNodeDefinition) {
for _, v := range d {
out = append(out, v)
}
return out
}
func (d PresentationNodeDefinitions) Find(id int) (out PresentationNodeDefinition) {
for k, v := range d {
if k == fmt.Sprint(id) {
return v
}
}
return PresentationNodeDefinition{}
}
func (d PresentationNodeDefinitions) Name() string {
return PresentationNodeDefinitionName
}
type PresentationNodeDefinition struct {
Blacklisted bool `json:"blacklisted" yaml:"blacklisted,omitempty"`
Children Children `json:"children" yaml:"children,omitempty"`
DisableChildSubscreenNavigation bool `json:"disableChildSubscreenNavigation" yaml:"disableChildSubscreenNavigation,omitempty"`
DisplayProperties DisplayProperties `json:"displayProperties" yaml:"displayProperties,omitempty"`
DisplayStyle int `json:"displayStyle" yaml:"displayStyle,omitempty"`
Hash int `json:"hash" yaml:"hash,omitempty"`
Index int `json:"index" yaml:"index,omitempty"`
NodeType int `json:"nodeType" yaml:"nodeType,omitempty"`
ObjectiveHash int `json:"objectiveHash" yaml:"objectiveHash,omitempty"`
OriginalIcon string `json:"originalIcon" yaml:"originalIcon,omitempty"`
ParentNodeHashes []int64 `json:"parentNodeHashes" yaml:"parentNodeHashes,omitempty"`
Redacted bool `json:"redacted" yaml:"redacted,omitempty"`
Requirements PresentationRequirements `json:"requirements" yaml:"requirements,omitempty"`
RootViewIcon string `json:"rootViewIcon" yaml:"rootViewIcon,omitempty"`
Scope int `json:"scope" yaml:"scope,omitempty"`
ScreenStyle int `json:"screenStyle" yaml:"screenStyle,omitempty"`
}
type PresentationNodes struct {
PresentationNodeHash int64 `json:"presentationNodeHash" yaml:"presentationNodeHash,omitempty"`
}
type Children struct {
Collectibles []interface{} `json:"collectibles" yaml:"collectibles,omitempty"`
PresentationNodes []PresentationNodes `json:"presentationNodes" yaml:"presentationNodes,omitempty"`
Records []interface{} `json:"records" yaml:"records,omitempty"`
}
type PresentationRequirements struct {
EntitlementUnavailableMessage string `json:"entitlementUnavailableMessage" yaml:"entitlementUnavailableMessage,omitempty"`
}
func (d PresentationNodeDefinition) Name() string {
return PresentationNodeDefinitionName
}
func (d PresentationNodeDefinition) Json() ([]byte, error) {
return json.Marshal(d)
}
func (d PresentationNodeDefinition) PrettyJson() ([]byte, error) {
jout, err := d.Json()
if err != nil {
return nil, err
}
var pretty bytes.Buffer
if err := json.Indent(&pretty, jout, "", " "); err != nil {
return nil, err
}
return pretty.Bytes(), nil
}
func (d PresentationNodeDefinition) Yaml() ([]byte, error) {
return yaml.Marshal(d)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"bytes"
"os"
)
// Writer is a function that writes out a formatted log message.
type Writer func(text string, severity Severity)
// Std returns a Writer that writes to stdout if the message severity is less
// than an error, otherwise it writes to stderr.
func Std() Writer {
return func(text string, severity Severity) {
out := os.Stdout
if severity >= Error {
out = os.Stderr
}
out.WriteString(text)
out.WriteString("\n")
}
}
// Stdout returns a Writer that writes to stdout for all severities.
func Stdout() Writer {
return func(text string, severity Severity) {
out := os.Stdout
out.WriteString(text)
out.WriteString("\n")
}
}
// Buffer returns a Writer that writes to the returned buffer.
func Buffer() (Writer, *bytes.Buffer) {
buf, nl := &bytes.Buffer{}, false
return func(text string, severity Severity) {
buf.WriteString(text)
if nl {
buf.WriteString("\n")
}
nl = true
}, buf
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"os"
"time"
)
func main() {
var (
port = flag.String("port", env("PORT", "8080"), "The port")
)
flag.Parse()
args := flag.Args()
if len(args) < 1 {
log.Fatal("You must specify the `server` or `worker` subcommand.")
}
cmd := args[0]
switch cmd {
case "server":
log.Printf("Starting on %s", *port)
log.Fatal(http.ListenAndServe(":"+*port, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Printf("%s - %s", r.Method, r.URL)
w.WriteHeader(200)
io.WriteString(w, "Ok\n")
})))
case "worker":
for {
<-time.After(1 * time.Second)
fmt.Printf("Hard work %d...\n", rand.Int())
}
default:
log.Fatalf("Unknown subcommand: %s", cmd)
}
}
func env(key string, fallback string) string {
value := os.Getenv(key)
if value == "" {
value = fallback
}
return value
}
|
package main
import "fmt"
type node struct {
data int
next *node
}
type list struct {
root *node
count int
}
func (l *list) insert(data int) {
var n *node
if l.root == nil {
n = &node{
data: data,
}
} else {
n = &node{
data: data,
next: l.root,
}
}
l.root = n
l.count += 1
}
func (l *list) print() {
printUtil(l.root)
fmt.Printf("\n")
}
func (l *list) len() int {
return l.count
}
func (l *list) search(data int) *node {
return searchUtil(l.root, data)
}
func (l *list) delete(data int) {
// Get the current node that has the data
curr := searchUtil(l.root, data)
// node with data doesn't exist
if curr == nil {
return
}
prev := searchPrevUtil(l.root, data)
l.count -= 1
// root
if prev == nil {
l.root = curr.next
return
}
prev.next = curr.next
}
func searchUtil(n *node, data int) *node {
if n == nil {
return nil
}
if n.data == data {
return n
}
return searchUtil(n.next, data)
}
func searchPrevUtil(n *node, data int) *node {
if n == nil || n.next == nil {
return nil
}
if n.next.data == data {
return n
}
return searchPrevUtil(n.next, data)
}
func printUtil(n *node) {
if n != nil {
fmt.Printf("%v", n.data)
if n.next != nil {
fmt.Printf(" -> ")
printUtil(n.next)
}
}
}
func main() {
l := &list{
root: nil,
count: 0,
}
l.insert(1)
l.insert(2)
l.insert(3)
l.print()
fmt.Printf("len: %v \n", l.len())
l.delete(2)
l.print()
fmt.Printf("len: %v \n", l.len())
l.delete(3)
l.print()
fmt.Printf("len: %v \n", l.len())
l.delete(1)
l.print()
fmt.Printf("len: %v \n", l.len())
}
|
package easyws
//====================================================================
// hub maintains the set of active connections and broadcasts messages to connections.
type wshub struct {
// Registered connections.
connections map[int64]WebsocketTalker
server *WsServer
// Inbound messages from the connections.
broadcast chan []byte
// send message directly to some connection
send chan *MessageToSend
// Register requests from the connections.
register chan WebsocketTalker
// Unregister requests from connections.
unregister chan WebsocketTalker
}
func MakeHub(s *WsServer) *wshub {
var h = wshub{
broadcast: make(chan []byte),
register: make(chan WebsocketTalker),
unregister: make(chan WebsocketTalker),
connections: make(map[int64]WebsocketTalker),
server: s,
}
return &h
}
func (h *wshub) _deleteFromHubAndShutdown(c WebsocketTalker) {
if _, ok := h.connections[c.GetId()]; ok {
delete(h.connections, c.GetId())
c.shutdown()
}
}
func (h *wshub) run() {
var todel = []WebsocketTalker{}
for {
select {
case c := <-h.register:
// TODO unregister prev connection if exists or change id :)
h.connections[c.GetId()] = c
go h.server.conf.OnConnectionStatusChanged(c, STATUS_CONNECTED)
case c := <-h.unregister:
h._deleteFromHubAndShutdown(c)
case m := <-h.broadcast:
for _, c := range h.connections {
if !c.sendOrFalse(m) {
todel = append(todel, c)
}
}
case m := <-h.send:
if c, ok := h.connections[m.Receiverid]; ok {
if !c.sendOrFalse(m.Bytes) {
todel = append(todel, c)
}
}
}
if len(todel) > 0 {
for _, v := range todel {
h._deleteFromHubAndShutdown(v)
}
todel = []WebsocketTalker{}
}
}
}
|
package repository
import (
"encoding/json"
"log"
"time"
"va_test_a/internal/model"
"va_test_a/pkg/database"
)
type ToDoRepository interface {
CreateTask(string,string, time.Time) bool
GetTasks(string) []model.ToDo
}
type ToDoRepo struct {
}
func (t ToDoRepo) CreateTask(userName string,task string, date time.Time) bool {
userRepo := UserRepo{}
if !userRepo.IfExist(userName){
return false
}
db := database.DB{}
todoData := db.OpenTodoFile()
err := json.Unmarshal(todoData, &model.ToDoList)
if err != nil {
log.Fatal(err)
}
todo := model.ToDo{UserName: userName,Task: task,Date: date}
model.ToDoList = append(model.ToDoList, todo)
todoData, _ = json.MarshalIndent(model.ToDoList,"","")
db.WriteTodoFile(todoData)
return true
}
// GetTasks by username and its date does not come yet
func (t ToDoRepo) GetTasks(userName string) []model.ToDo {
var tasks []model.ToDo
userRepo := UserRepo{}
if !userRepo.IfExist(userName){
return nil
}
db := database.DB{}
todoData := db.OpenTodoFile()
err := json.Unmarshal(todoData, &model.ToDoList)
if err != nil {
log.Fatal(err)
}
for _,value := range model.ToDoList{
if value.UserName==userName&&value.Date.After(time.Now()) {
tasks=append(tasks,value)
}
}
return tasks
}
|
package bfs
import "github.com/victorfernandesraton/bfs-and-dfs/node"
// Execution é a função que pega um vertice de uma arvore qualquer e implementa o algoritimo
func Execution(n *node.Node, out *node.Output) *node.Output {
// Marca o vertice raiz como usado
if n.Used == false {
n.Used = true
out.Queue = append(out.Queue, n) // adiciona o vetice raiz cna listga de queues
}
// Verifica o nivel do vertice de acordo com seu parent
if out.LastLevel < n.Index {
out.LastLevel = n.Index
}
if len(n.Children) > 0 {
// se o vertice atual tiver adjacencia, percorre estas
out.LastLevel = n.Index // adiciona um level na marcação dos levels
for _, item := range n.Children { // para cada vertice verifica se esse foi usado
if item.Used == false {
// Marca como visitado
item.Used = true
out.Queue = append(out.Queue, item) // adiciona o vetice raiz cna listga de queues
}
}
// Percorre executando bfs
for _, item := range n.Children {
Execution(item, out)
}
}
// a kista de querys e retornada como saida
return out
}
|
package algo_test
import (
"testing"
"github.com/bpatel85/learn-go/pkg/algo"
)
type FindPathTestStructs struct {
input [][]int
expected int
}
func TestNumPaths(t *testing.T) {
testRuns := []FindPathTestStructs{
{
input: [][]int{
{0, 0, 0},
{0, 0, 0},
{0, 0, 0},
},
expected: 6,
},
{
input: [][]int{
{0},
{0},
{0},
},
expected: 1,
},
{
input: [][]int{
{0},
},
expected: 1,
},
{
input: nil,
expected: 0,
},
{
input: [][]int{
{0, 0, 0},
{0, 1, 0},
{0, 0, 0},
},
expected: 2,
},
}
fp := algo.FindNumPaths{}
for _, tc := range testRuns {
actual := fp.CountNumPaths(tc.input)
if actual != tc.expected {
t.Errorf("not right number of path. Expected: %d, Actual: %d", tc.expected, actual)
}
}
}
|
package foo
// Comment for struct
type Foo struct {
// comment before
a string // comment at same line
// comment after
b []string
// comment after without fields following
}
func newFoo() Foo {
return Foo{
// comment before
a: "a", // comment at same line
// comment after
b: []string{
// comment before
"a", // comment at same line
// comment after
},
}
}
|
package controllers
import (
"github.com/astaxie/beego"
)
type AddUpController struct {
beego.Controller
}
// 文章更新 数据校验 路由 /api/article/update
func (this *AddUpController) AddUp() {
//this.Layout = layout
//this.TplName = theme + "/tongji.html"
this.TplName = theme + "/tongji.html"
}
|
package extractpublicfiles
import (
"context"
"errors"
"fmt"
"github.com/function61/gokit/ezhttp"
"github.com/function61/gokit/fileexists"
"github.com/function61/passitron/pkg/tarextract"
"io"
"log"
"net/url"
"os"
)
const (
PublicFilesArchiveFilename = "public.tar.gz"
publicFilesDirectory = "public"
)
var errDownloadWithDevVersion = errors.New("public files dir not exists and not using released version - don't know how to fix this")
func BintrayDownloadUrl(user string, repo string, filePath string) string {
return fmt.Sprintf(
"https://bintray.com/%s/%s/download_file?file_path=%s",
user,
repo,
url.QueryEscape(filePath))
}
func downloadPublicFiles(downloadUrl string, destination string, logger *log.Logger) error {
if downloadUrl == "" {
return errDownloadWithDevVersion
}
logger.Printf(
"downloadPublicFiles: %s missing; downloading from %s",
destination,
downloadUrl)
tempFilename := destination + ".dltemp"
tempFile, err := os.Create(tempFilename)
if err != nil {
return err
}
defer tempFile.Close()
ctx, cancel := context.WithTimeout(context.TODO(), ezhttp.DefaultTimeout10s)
defer cancel()
resp, errHttp := ezhttp.Get(ctx, downloadUrl)
if errHttp != nil {
return errHttp
}
defer resp.Body.Close()
if _, err := io.Copy(tempFile, resp.Body); err != nil {
return err
}
if err := tempFile.Close(); err != nil { // double close is intentional
return err
}
if err := os.Rename(tempFilename, destination); err != nil {
return err
}
logger.Printf("downloadPublicFiles: %s succesfully downloaded", destination)
return nil
}
func Run(downloadUrl string, archiveFilename string, logger *log.Logger) error {
dirExists, err := fileexists.Exists(publicFilesDirectory)
if err != nil {
return err
}
if dirExists { // our job here is done
return nil
}
archiveExists, err := fileexists.Exists(archiveFilename)
if err != nil {
return err
}
if !archiveExists {
if err := downloadPublicFiles(downloadUrl, archiveFilename, logger); err != nil {
return err
}
}
logger.Printf("extractPublicFiles: extracting public files from %s", archiveFilename)
f, err := os.Open(archiveFilename)
if err != nil {
return err
}
defer f.Close()
if err := tarextract.ExtractTarGz(f); err != nil {
return err
}
return nil
}
|
package acrostic
import (
"errors"
"github.com/noyuno/lgo/runes"
)
// CaseElement : 格要素側(PredicateのCaseElementGroup(格要素群)ではない)
type CaseElement struct {
// knpの基本句の出力行
BasicPhrase []rune
// AnalysisCase : 解析格(被連体修飾詞以外)
AnalysisCase []rune
// HasAnalysisCase : 解析格を持つかどうか
HasAnalysisCase bool
// AnalysisConnection : 解析連絡(被連体修飾詞)
AnalysisConnection []rune
// HasAnalysisConnection : 解析連絡を持つかどうか
HasAnalysisConnection bool
}
// NewCaseElement : constructor
func NewCaseElement(bp []rune) *CaseElement {
ret := new(CaseElement)
ret.BasicPhrase = bp
return ret
}
func (c *CaseElement) Copy() *CaseElement {
r := NewCaseElement(c.BasicPhrase)
r.AnalysisCase = runes.Copy(c.AnalysisCase)
r.HasAnalysisCase = c.HasAnalysisCase
r.AnalysisConnection = runes.Copy(c.AnalysisConnection)
r.HasAnalysisCase = c.HasAnalysisCase
return r
}
// Analyze : 格要素側の解析
func (ce *CaseElement) Analyze() error {
begincase := []rune("<解析格:")
beginconnection := []rune("<解析連絡:")
end := []rune(">")
acb := runes.Index(ce.BasicPhrase, begincase, 0)
if acb != -1 {
ace := runes.Index(ce.BasicPhrase, end, acb)
if ace == -1 {
return errors.New("CaseElement.Analyze found begin of AnalysisCase, but not found end of.")
}
ce.AnalysisCase = ce.BasicPhrase[acb+len(begincase) : ace]
ce.HasAnalysisCase = true
//return errors.New("CaseElement.Analyze cannot find begin of tag")
}
acb = runes.Index(ce.BasicPhrase, beginconnection, 0)
if acb != -1 {
ace := runes.Index(ce.BasicPhrase, end, acb)
if ace == -1 {
return errors.New("CaseElement.Analyze found begin of AnalysisConnection, but not found end of.")
}
ce.AnalysisConnection = ce.BasicPhrase[acb+len(beginconnection) : ace]
ce.HasAnalysisConnection = true
}
if !ce.HasAnalysisCase && !ce.HasAnalysisConnection {
return errors.New("CaseElement.Text has not contain any of case element tags")
}
//log.WithFields(log.Fields{
// "AnalysisCase": string(ce.AnalysisCase),
// "AnalysisConnection": string(ce.AnalysisConnection)}).Debug("[CaseElement]")
return nil
}
|
/*
* @Author: CJ Ting
* @Date: 2016-06-02 20:53:54
* @Last Modified by: dingxijin
* @Last Modified time: 2016-06-02 23:18:47
*/
package main
import (
"flag"
"fmt"
"io"
"log"
"net"
"time"
)
func main() {
port := flag.Int("port", 5000, "specify the port")
flag.Parse()
listener, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatal(err)
}
fmt.Printf("Server is running on port %d\n", *port)
for {
conn, err := listener.Accept()
if err != nil {
log.Println(err)
continue
}
go handleConn(conn)
}
}
func handleConn(conn net.Conn) {
defer conn.Close()
for {
_, err := io.WriteString(conn, time.Now().Format("15:04:05\n"))
if err != nil {
return
}
time.Sleep(1 * time.Second)
}
}
|
package main
import (
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/golib/pkg/loadbalancing/dnslb/config"
)
func rollingReplaceSubcommand(args []string, logger log.DebugLogger) error {
for _, region := range args {
if err := config.RollingReplace(cfgData, region, logger); err != nil {
return err
}
}
return nil
}
|
package template
import "github.com/spf13/cobra"
var RootCMD = &cobra.Command{
Use: "template",
Short: "Commands to create pre-filled templates for jobs",
Long: ``,
}
func init() {
RootCMD.AddCommand(customerCMD)
RootCMD.AddCommand(applicationCMD)
}
|
package model
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/olivere/elastic/v7"
"lhc.go.game.center/libs/es"
"lhc.go.game.center/logs"
)
type NetbianImg struct {
Id string `json:"id"`
Name string `json:"name"`
Alt string `json:"alt" form:"alt"`
Details string `json:"details"`
Src string `json:"src"`
DownloadsUrl string `json:"downloads_url"`
Sort int `json:"sort"`
Mark string `json:"mark"`
Type string `json:"type"`
Origin string `json:"origin"`
TickTime string `json:"tick_time"`
Lists []string `json:"lists"`
}
func NewNetbianImg() *NetbianImg {
return &NetbianImg{}
}
func (this *NetbianImg) GetList(param Params)(data []*NetbianImg,total int64,err error) {
client := es.Client.Search("gallery")
if this.Alt!="" {
query := elastic.NewQueryStringQuery("alt:"+this.Alt)
client = client.Query(query)
}
result, err := client.From(param.Satrt).Size(param.Length).Do(context.Background())
if err!=nil {
return nil,0,err
}
total = result.Hits.TotalHits.Value
if total > 0 {
for _,v := range result.Hits.Hits {
var tmp NetbianImg
err := json.Unmarshal(v.Source, &tmp)
if err != nil {
logs.Error.Error(err)
return nil,0,err
}
tmp.Id = v.Id
data = append(data, &tmp)
}
}
return
}
func (this *NetbianImg) GetOneById()(err error) {
if this.Id=="" {
return errors.New("id 不能为空")
}
result, err := es.Client.Index().Index("gallery").Id(this.Id).Do(context.Background())
fmt.Printf("%#v\n",result.Shards.Failures)
return
} |
package main
import (
"./models"
//"github.com/gorilla/websocket"
"net/http"
"html/template"
"github.com/gorilla/mux"
)
var templates *template.Template
func main() {
models.New()
setupRoutes()
}
func setupRoutes() {
r := mux.NewRouter()
bal := models.GetBalance()
templates = template.Must(template.ParseGlob("pages/*.html"))
r.HandleFunc("/", indexHandler).Methods("GET")
r.HandleFunc("/", indexHandler).Methods("POST")
r.HandleFunc("/upgrade-click/", models.BuildCUpgradeHandler(bal)).Methods("POST")
r.HandleFunc("/add/", models.BalanceHandler).Methods("POST")
r.HandleFunc("/auto-inc-one/", models.BuildAutoIncOneHandler(bal)).Methods("POST")
r.PathPrefix("/static/").Handler(http.StripPrefix("/static", http.FileServer(http.Dir("./static/"))))
http.Handle("/", r)
http.ListenAndServe(":8080", nil)
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
go models.AutoIncOne()
bal := models.GetBalance()
status := models.GetStatus()
/*clickStatus := models.GetClickStatus()
autoIncOneString := models.GetAutoIncOneString()
CoinPerSecond := models.GetCoinPerSecond()*/
if status.ClickStatus == 0 {
err := templates.ExecuteTemplate(w, "index.html", map[string]interface{}{
"Balance": bal,
"ClickUpString": "20 coins to upgrade",
"ClickPower": "1",
"IncOneString": status.AutoIncOneString,
"CoinPerSecond": status.CoinPerSecond,
})
if err != nil {
return
}
}
if status.ClickStatus == 1 {
err := templates.ExecuteTemplate(w, "index.html", map[string]interface{}{
"Balance": bal,
"ClickUpString": "100 coins to upgrade",
"ClickPower": "2",
"IncOneString": status.AutoIncOneString,
"CoinPerSecond": status.CoinPerSecond,
})
if err != nil {
return
}
}
if status.ClickStatus == 2 {
err := templates.ExecuteTemplate(w, "index.html", map[string]interface{}{
"Balance": bal,
"ClickUpString": "275 coins to upgrade",
"ClickPower": "4",
"IncOneString": status.AutoIncOneString,
"CoinPerSecond": status.CoinPerSecond,
})
if err != nil {
return
}
}
if status.ClickStatus == 3 {
err := templates.ExecuteTemplate(w, "index.html", map[string]interface{}{
"Balance": bal,
"ClickUpString": "No more upgrades",
"ClickPower": "6",
"IncOneString": status.AutoIncOneString,
"CoinPerSecond": status.CoinPerSecond,
})
if err != nil {
return
}
}
}
|
package authorization
import (
"context"
"github.com/G-Research/armada/internal/armada/authorization/permissions"
)
type Owned interface {
GetUserOwners() []string
GetGroupOwners() []string
}
type PermissionChecker interface {
UserHasPermission(ctx context.Context, perm permissions.Permission) bool
UserOwns(ctx context.Context, obj Owned) bool
}
type PrincipalPermissionChecker struct {
permissionGroupMap map[permissions.Permission][]string
permissionScopeMap map[permissions.Permission][]string
}
func NewPrincipalPermissionChecker(
permissionGroupMap map[permissions.Permission][]string,
permissionScopeMap map[permissions.Permission][]string) *PrincipalPermissionChecker {
return &PrincipalPermissionChecker{
permissionGroupMap: permissionGroupMap,
permissionScopeMap: permissionScopeMap}
}
func (checker *PrincipalPermissionChecker) UserHasPermission(ctx context.Context, perm permissions.Permission) bool {
principal := GetPrincipal(ctx)
return hasPermission(perm, checker.permissionScopeMap, func(scope string) bool { return principal.HasScope(scope) }) ||
hasPermission(perm, checker.permissionGroupMap, func(group string) bool { return principal.IsInGroup(group) })
}
func (checker *PrincipalPermissionChecker) UserOwns(ctx context.Context, obj Owned) bool {
principal := GetPrincipal(ctx)
currentUserName := principal.GetName()
for _, userName := range obj.GetUserOwners() {
if userName == currentUserName {
return true
}
}
for _, group := range obj.GetGroupOwners() {
if principal.IsInGroup(group) {
return true
}
}
return false
}
func hasPermission(perm permissions.Permission, permMap map[permissions.Permission][]string, assert func(string) bool) bool {
allowedValues, ok := permMap[perm]
if !ok {
return false
}
for _, value := range allowedValues {
if assert(value) {
return true
}
}
return false
}
|
package swagger2gql
import (
"github.com/pkg/errors"
"github.com/EGT-Ukraine/go2gql/generator/plugins/graphql"
"github.com/EGT-Ukraine/go2gql/generator/plugins/swagger2gql/parser"
)
var scalarsResolvers = map[parser.Kind]graphql.TypeResolver{
parser.KindBoolean: graphql.GqlBoolTypeResolver,
parser.KindFloat64: graphql.GqlFloat64TypeResolver,
parser.KindFloat32: graphql.GqlFloat32TypeResolver,
parser.KindInt64: graphql.GqlInt64TypeResolver,
parser.KindInt32: graphql.GqlInt32TypeResolver,
parser.KindString: graphql.GqlStringTypeResolver,
parser.KindNull: graphql.GqlNoDataTypeResolver,
parser.KindFile: graphql.GqlMultipartFileTypeResolver,
parser.KindDateTime: graphql.GqlStringTypeResolver,
}
func (p *Plugin) TypeOutputTypeResolver(typeFile *parsedFile, typ parser.Type, required bool) (graphql.TypeResolver, error) {
var res graphql.TypeResolver
switch t := typ.(type) {
case *parser.Scalar:
resolver, ok := scalarsResolvers[typ.Kind()]
if !ok {
return nil, errors.Errorf(": %s", typ.Kind())
}
res = resolver
case *parser.Object:
res = p.outputMessageTypeResolver(typeFile, t)
case *parser.Array:
elemResolver, err := p.TypeOutputTypeResolver(typeFile, t.ElemType, true)
if err != nil {
return nil, errors.Wrap(err, "failed to get array element type resolver")
}
res = graphql.GqlListTypeResolver(elemResolver)
case *parser.Map:
res = func(ctx graphql.BodyContext) string {
return p.mapOutputObjectVariable(typeFile, t)
}
res = graphql.GqlListTypeResolver(graphql.GqlNonNullTypeResolver(res))
default:
return nil, errors.Errorf("not implemented %v", typ.Kind())
}
if required {
res = graphql.GqlNonNullTypeResolver(res)
}
return res, nil
}
func (p *Plugin) TypeInputTypeResolver(typeFile *parsedFile, typ parser.Type) (graphql.TypeResolver, error) {
switch t := typ.(type) {
case *parser.Scalar:
resolver, ok := scalarsResolvers[t.Kind()]
if !ok {
return nil, errors.Errorf("unimplemented scalar type: %s", t.Kind())
}
return resolver, nil
case *parser.Object:
return p.inputObjectTypeResolver(typeFile, t), nil
case *parser.Array:
elemResolver, err := p.TypeInputTypeResolver(typeFile, t.ElemType)
if err != nil {
return nil, errors.Wrap(err, "failed to get array element type resolver")
}
return graphql.GqlListTypeResolver(elemResolver), nil
case *parser.Map:
res := func(ctx graphql.BodyContext) string {
return p.mapInputObjectVariable(typeFile, t)
}
return graphql.GqlListTypeResolver(graphql.GqlNonNullTypeResolver(res)), nil
}
return nil, errors.New("not implemented " + typ.Kind().String())
}
func (p *Plugin) TypeValueResolver(
file *parsedFile,
typ parser.Type,
required bool,
ctxKey string) (_ graphql.ValueResolver, withErr, fromArgs bool, err error) {
if ctxKey != "" {
goType, err := p.goTypeByParserType(file, typ, true)
if err != nil {
return nil, false, false, errors.Wrap(err, "failed to resolve go type")
}
return func(arg string, ctx graphql.BodyContext) string {
valueType := goType.String(ctx.Importer)
if !required {
return `func() (val ` + valueType + `, err error) {
contextValue := ctx.Value("` + ctxKey + `")
if contextValue == nil {
err = errors.New("Can't find key '` + ctxKey + `' in context")
return
}
val, ok := contextValue.(` + valueType + `)
if !ok {
err = errors.New("Incompatible '` + ctxKey + `' key type in context. Expected ` + valueType + `")
return
}
return
}()`
}
return `func() (*` + valueType + `, error) {
contextValue := ctx.Value("` + ctxKey + `")
if contextValue == nil {
return nil, errors.New("Can't find key '` + ctxKey + `' in context")
}
val, ok := contextValue.(` + valueType + `)
if !ok {
return nil, errors.New("Incompatible '` + ctxKey + `' key type in context. Expected ` + valueType + `")
}
return &val, nil
}()`
}, true, false, nil
}
switch t := typ.(type) {
case *parser.Scalar:
if t.Kind() == parser.KindFile {
return func(arg string, ctx graphql.BodyContext) string {
return "(" + arg + ").(*" + ctx.Importer.Prefix(graphql.MultipartFilePkgPath) + "MultipartFile)"
}, false, true, nil
}
goTyp, ok := scalarsGoTypesNames[typ.Kind()]
if !ok {
return nil, false, false, errors.Errorf("scalar %s is not implemented", typ.Kind())
}
return func(arg string, ctx graphql.BodyContext) string {
if !required {
return arg + ".(" + goTyp + ")"
}
return "func(arg interface{}) *" + goTyp + "{\n" +
"val := arg.(" + goTyp + ")\n" +
"return &val\n" +
"}(" + arg + ")"
}, false, true, nil
case *parser.Object:
if t == parser.ObjDateTime {
return func(arg string, ctx graphql.BodyContext) string {
if required {
res, err := p.renderPtrDatetimeResolver(arg, ctx)
if err != nil {
panic(errors.Wrap(err, "failed to render ptr datetime resolver"))
}
return res
}
res, err := p.renderDatetimeValueResolverTemplate(arg, ctx)
if err != nil {
panic(errors.Wrap(err, "failed to render ptr datetime resolver"))
}
return res
}, true, true, nil
}
return graphql.ResolverCall(file.OutputPkg, "Resolve"+snakeCamelCaseSlice(t.Route)), true, true, nil
case *parser.Array:
elemResolver, elemResolverWithErr, _, err := p.TypeValueResolver(file, t.ElemType, false, "")
if err != nil {
return nil, false, false, errors.Wrap(err, "failed to get array element type value resolver")
}
goTyp, err := p.goTypeByParserType(file, typ, true)
if err != nil {
return nil, false, false, errors.Wrap(err, "failed to resolve go type by parser type")
}
return func(arg string, ctx graphql.BodyContext) string {
res, err := p.renderArrayValueResolver(arg, goTyp, ctx, elemResolver, elemResolverWithErr)
if err != nil {
panic(err)
}
return res
}, true, true, nil
case *parser.Map:
return graphql.ResolverCall(file.OutputPkg, p.mapResolverFunctionName(file, t)), true, true, nil
}
return nil, false, true, errors.Errorf("unknown type: %v", typ.Kind().String())
}
|
package main
import (
"algogrit.com/fib-grpc/pkg/auth"
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
func withServerUnaryInterceptor(enableAuth bool) grpc.ServerOption {
uInterceptorChain := grpcMiddleware.ChainUnaryServer(
logUnaryInterceptor,
grpc_prometheus.UnaryServerInterceptor,
)
if enableAuth {
basic := auth.NewBasicAuth()
uInterceptorChain = grpcMiddleware.ChainUnaryServer(
uInterceptorChain,
grpc_auth.UnaryServerInterceptor(basic.Interceptor),
)
}
return grpc.UnaryInterceptor(uInterceptorChain)
}
func withServerStreamInterceptor(enableAuth bool) grpc.ServerOption {
sInterceptorChain := grpcMiddleware.ChainStreamServer(
logStreamInterceptor,
grpc_prometheus.StreamServerInterceptor,
)
if enableAuth {
basic := auth.NewBasicAuth()
sInterceptorChain = grpcMiddleware.ChainStreamServer(
sInterceptorChain,
grpc_auth.StreamServerInterceptor(basic.Interceptor),
)
}
return grpc.StreamInterceptor(sInterceptorChain)
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package p9
import (
"os"
"testing"
)
func TestFileModeHelpers(t *testing.T) {
fns := map[FileMode]struct {
// name identifies the file mode.
name string
// function is the function that should return true given the
// right FileMode.
function func(m FileMode) bool
}{
ModeRegular: {
name: "regular",
function: FileMode.IsRegular,
},
ModeDirectory: {
name: "directory",
function: FileMode.IsDir,
},
ModeNamedPipe: {
name: "named pipe",
function: FileMode.IsNamedPipe,
},
ModeCharacterDevice: {
name: "character device",
function: FileMode.IsCharacterDevice,
},
ModeBlockDevice: {
name: "block device",
function: FileMode.IsBlockDevice,
},
ModeSymlink: {
name: "symlink",
function: FileMode.IsSymlink,
},
ModeSocket: {
name: "socket",
function: FileMode.IsSocket,
},
}
for mode, info := range fns {
// Make sure the mode doesn't identify as anything but itself.
for testMode, testfns := range fns {
if mode != testMode && testfns.function(mode) {
t.Errorf("Mode %s returned true when asked if it was mode %s", info.name, testfns.name)
}
}
// Make sure mode identifies as itself.
if !info.function(mode) {
t.Errorf("Mode %s returned false when asked if it was itself", info.name)
}
}
}
func TestFileModeToQID(t *testing.T) {
for _, test := range []struct {
// name identifies the test.
name string
// mode is the FileMode we start out with.
mode FileMode
// want is the corresponding QIDType we expect.
want QIDType
}{
{
name: "Directories are of type directory",
mode: ModeDirectory,
want: TypeDir,
},
{
name: "Sockets are append-only files",
mode: ModeSocket,
want: TypeAppendOnly,
},
{
name: "Named pipes are append-only files",
mode: ModeNamedPipe,
want: TypeAppendOnly,
},
{
name: "Character devices are append-only files",
mode: ModeCharacterDevice,
want: TypeAppendOnly,
},
{
name: "Symlinks are of type symlink",
mode: ModeSymlink,
want: TypeSymlink,
},
{
name: "Regular files are of type regular",
mode: ModeRegular,
want: TypeRegular,
},
{
name: "Block devices are regular files",
mode: ModeBlockDevice,
want: TypeRegular,
},
} {
if qidType := test.mode.QIDType(); qidType != test.want {
t.Errorf("ModeToQID test %s failed: got %o, wanted %o", test.name, qidType, test.want)
}
}
}
func TestP9ModeConverters(t *testing.T) {
for _, m := range []FileMode{
ModeRegular,
ModeDirectory,
ModeCharacterDevice,
ModeBlockDevice,
ModeSocket,
ModeSymlink,
ModeNamedPipe,
} {
if mb := ModeFromOS(m.OSMode()); mb != m {
t.Errorf("Converting %o to OS.FileMode gives %o and is converted back as %o", m, m.OSMode(), mb)
}
}
}
func TestOSModeConverters(t *testing.T) {
// Modes that can be converted back and forth.
for _, m := range []os.FileMode{
0, // Regular file.
os.ModeDir,
os.ModeCharDevice | os.ModeDevice,
os.ModeDevice,
os.ModeSocket,
os.ModeSymlink,
os.ModeNamedPipe,
} {
if mb := ModeFromOS(m).OSMode(); mb != m {
t.Errorf("Converting %o to p9.FileMode gives %o and is converted back as %o", m, ModeFromOS(m), mb)
}
}
// Modes that will be converted to a regular file since p9 cannot
// express these.
for _, m := range []os.FileMode{
os.ModeAppend,
os.ModeExclusive,
os.ModeTemporary,
} {
if p9Mode := ModeFromOS(m); p9Mode != ModeRegular {
t.Errorf("Converting %o to p9.FileMode should have given ModeRegular, but yielded %o", m, p9Mode)
}
}
}
func TestAttrMaskContains(t *testing.T) {
req := AttrMask{Mode: true, Size: true}
have := AttrMask{}
if have.Contains(req) {
t.Fatalf("AttrMask %v should not be a superset of %v", have, req)
}
have.Mode = true
if have.Contains(req) {
t.Fatalf("AttrMask %v should not be a superset of %v", have, req)
}
have.Size = true
have.MTime = true
if !have.Contains(req) {
t.Fatalf("AttrMask %v should be a superset of %v", have, req)
}
}
|
package generator
import (
"testing"
)
var aliasTests = []struct {
alias string
result bool
}{
{"simplealias", true},
{"simple-alias", true},
{"simple.alias", true},
{"simple/alias", true},
{"simple@alias", true},
{"simple@alias.com", true},
{".simplealias", false},
{"/simplealias", false},
}
func TestIsAlias(t *testing.T) {
for _, at := range aliasTests {
b := IsAlias(at.alias)
if b != at.result {
t.Errorf("is alias (%s) should be %t", at.alias, at.result)
}
}
}
var fileTests = []struct {
alias string
result bool
}{
{"simplefile", false},
{"git@simplefile", false},
{"http://simplefile", false},
{".simplefile", true},
{"./simplefile", true},
{"/simplefile", true},
{"../simplefile", true},
{"../testdata/generator/simple", true},
{"file://simplefile", true},
}
func TestIsFile(t *testing.T) {
for _, ft := range fileTests {
b := IsFilePath(ft.alias)
if b != ft.result {
t.Errorf("is file (%s) should be %t", ft.alias, ft.result)
}
}
}
var repoTests = []struct {
alias string
result bool
}{
{"simplerepo", false},
{".simplerepo", false},
{"./simplerepo", false},
{"/simplerepo", false},
{"../simplerepo", false},
{"file://simplerepo", false},
{"git@simplerepo", false},
{"http://simplerepo", true},
{"http://simplerepo.com", true},
{"git@simplerepo.org/slug", true},
{"git@github.com:brainicorn/skelp.git", true},
}
func TestIsRepo(t *testing.T) {
for _, rt := range repoTests {
b := IsRepoURL(rt.alias)
if b != rt.result {
t.Errorf("is repo (%s) should be %t", rt.alias, rt.result)
}
}
}
func TestFilepathFromURLNotAURL(t *testing.T) {
_, err := FilepathFromURL("../somedir")
if err == nil {
t.Errorf("Expected error but was nil")
}
}
|
package main
import (
"time"
)
type CommandQueue struct {
RPS int
ChunkSize int
CommandsCh chan VKCommand
ChunksCh chan VKCommandsChunk
}
func NewCommandsQueue(rps int) *CommandQueue {
return &CommandQueue{
RPS: rps,
ChunkSize: 25,
CommandsCh: make(chan VKCommand),
ChunksCh: make(chan VKCommandsChunk),
}
}
func (queue *CommandQueue) Run() {
go func() {
buffer := make(map[string]VKCommands)
ticker := time.NewTicker(time.Second)
for {
select {
case command := <-queue.CommandsCh:
logger.Debugf("append command to queue: %+v", command)
buffer[command.AccessToken] = append(
buffer[command.AccessToken],
command,
)
case <-ticker.C:
for accessToken, commands := range buffer {
logger.Debugf(
"delivering %d commands for access token %s",
len(commands),
accessToken,
)
if queue.deliver(commands, accessToken) {
delete(buffer, accessToken)
}
}
}
}
}()
}
func (queue *CommandQueue) deliver(
commands VKCommands,
accessToken string,
) bool {
total := len(commands)
if total == 0 {
return true
}
delivered := 0
i := 0
for ; i < queue.RPS && delivered < total; i++ {
size := len(commands)
if size > queue.ChunkSize {
size = queue.ChunkSize
}
queue.ChunksCh <- VKCommandsChunk{
AccessToken: accessToken,
Commands: commands[:size],
}
commands = commands[size:]
delivered += size
}
logger.Infof(
"delivered %d of %d commands in %d batches",
delivered,
total,
i,
)
return total == delivered
}
|
package controller
import (
"github.com/kataras/iris"
"go-iris-mv/model"
)
func (idb* InDB) CreteUser(ctx iris.Context) {
var (
user model.User
)
ctx.ReadJSON(&user)
idb.DB.Create(&user)
ctx.JSON(iris.Map{
"error" : "false",
"status" : iris.StatusOK,
"result" : user,
})
}
func (idb* InDB) GetAll(ctx iris.Context) {
var(
user []model.User // [] for array result
result iris.Map
)
ctx.ReadJSON(&user)
idb.DB.Find(&user)
if len(user) <= 0 {
result = iris.Map{
"error" : "false",
"status" : iris.StatusOK,
"result" : nil,
"count" : 0,
}
} else {
result = iris.Map{
"error" : "false",
"status" : iris.StatusOK,
"result" : user,
"count" : len(user),
}
}
ctx.JSON(result)
}
func (idb* InDB) GetById(ctx iris.Context) {
var (
user model.User
result iris.Map
)
id := ctx.Params().Get("id")
err := idb.DB.Where("id = ?", id).First(&user).Error
if err != nil {
result = iris.Map{
"error" : "true",
"status" : iris.StatusBadRequest,
"result" : err.Error(),
"count" : 0,
}
} else {
result = iris.Map{
"error" : "false",
"status" : iris.StatusOK,
"result" : user,
"count" : 1,
}
}
ctx.JSON(result)
}
func (idb* InDB) UpdateUser (ctx iris.Context) {
var (
user model.User
newUser model.User
result iris.Map
)
id := ctx.Params().Get("id")
err := idb.DB.First(&user, id).Error
if err != nil {
result = iris.Map{
"error" : "true",
"status" : iris.StatusBadRequest,
"message" : "user not found",
"result" : nil,
}
}
ctx.ReadJSON(&newUser)
err = idb.DB.Model(&user).Updates(newUser).Error
if err != nil {
result = iris.Map{
"error" : "true",
"status" : iris.StatusBadRequest,
"message" : "error when update user",
"result" : err.Error(),
}
} else {
result = iris.Map{
"error" : "false",
"status" : iris.StatusOK,
"message" : "success update user",
"result" : newUser,
}
}
ctx.JSON(result)
}
func (idb* InDB) DeleteUser(ctx iris.Context) {
var(
user model.User
result iris.Map
)
id := ctx.Params().Get("id")
err := idb.DB.First(&user, id).Error
if err != nil {
result = iris.Map{
"error" : "true",
"status" : iris.StatusBadRequest,
"message" : "User not found",
"result" : nil,
}
}
err = idb.DB.Where("id = ?", id).Delete(&user, id).Error
if err != nil {
result = iris.Map{
"error" : "true",
"status" : iris.StatusBadRequest,
"message" : "Failed Delete user",
"result" : err.Error(),
}
} else {
result = iris.Map{
"error" : "false",
"status" : iris.StatusOK,
"message" : "Failed Delete user",
"result" : nil,
}
}
ctx.JSON(result)
} |
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package store
import (
"testing"
"time"
"github.com/mattermost/mattermost-cloud/internal/testlib"
"github.com/mattermost/mattermost-cloud/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCountSubscriptionsForEvent(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
value := "bar"
sub1 := &model.Subscription{
EventType: model.ResourceStateChangeEventType,
Headers: model.Headers{
{
Key: "foo",
Value: &value,
},
},
}
err := sqlStore.CreateSubscription(sub1)
require.NoError(t, err)
sub2 := &model.Subscription{
EventType: model.ResourceStateChangeEventType,
}
err = sqlStore.CreateSubscription(sub2)
require.NoError(t, err)
sub3 := &model.Subscription{
EventType: "different",
}
err = sqlStore.CreateSubscription(sub3)
require.NoError(t, err)
subsCount, err := sqlStore.CountSubscriptionsForEvent(model.ResourceStateChangeEventType)
require.NoError(t, err)
assert.Equal(t, int64(2), subsCount)
}
func TestGetCreateUpdateSubscription(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
sub := &model.Subscription{
Name: "test",
URL: "http://test",
OwnerID: "tester",
EventType: model.ResourceStateChangeEventType,
LastDeliveryStatus: model.SubscriptionDeliverySucceeded,
LastDeliveryAttemptAt: 100,
FailureThreshold: 2 * time.Minute,
}
err := sqlStore.CreateSubscription(sub)
require.NoError(t, err)
assert.NotEmpty(t, sub.ID)
fetchedSub, err := sqlStore.GetSubscription(sub.ID)
require.NoError(t, err)
assert.Equal(t, "test", fetchedSub.Name)
assert.Equal(t, "http://test", fetchedSub.URL)
assert.Equal(t, "tester", fetchedSub.OwnerID)
assert.Equal(t, model.ResourceStateChangeEventType, fetchedSub.EventType)
assert.Equal(t, model.SubscriptionDeliverySucceeded, fetchedSub.LastDeliveryStatus)
assert.Equal(t, int64(100), fetchedSub.LastDeliveryAttemptAt)
assert.Equal(t, 2*time.Minute, fetchedSub.FailureThreshold)
t.Run("unknown ID", func(t *testing.T) {
s, errTest := sqlStore.GetSubscription(model.NewID())
require.NoError(t, errTest)
assert.Nil(t, s)
})
sub.LastDeliveryStatus = model.SubscriptionDeliveryFailed
sub.LastDeliveryAttemptAt = 10000
sub.Name = "should not change"
err = sqlStore.UpdateSubscriptionStatus(sub)
require.NoError(t, err)
fetchedSub, err = sqlStore.GetSubscription(sub.ID)
require.NoError(t, err)
assert.Equal(t, model.SubscriptionDeliveryFailed, fetchedSub.LastDeliveryStatus)
assert.Equal(t, int64(10000), fetchedSub.LastDeliveryAttemptAt)
assert.Equal(t, "test", fetchedSub.Name)
err = sqlStore.DeleteSubscription(sub.ID)
require.NoError(t, err)
fetchedSub, err = sqlStore.GetSubscription(sub.ID)
require.NoError(t, err)
assert.True(t, fetchedSub.DeleteAt > 0)
}
func TestGetSubscriptions(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
subs := []*model.Subscription{
{OwnerID: "tester1", EventType: model.ResourceStateChangeEventType},
{OwnerID: "tester1", EventType: "test"},
{OwnerID: "tester2", EventType: model.ResourceStateChangeEventType},
{OwnerID: "tester3", EventType: "test2"},
}
for i := range subs {
err := sqlStore.CreateSubscription(subs[i])
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
}
err := sqlStore.DeleteSubscription(subs[3].ID)
require.NoError(t, err)
for _, testCase := range []struct {
description string
filter *model.SubscriptionsFilter
fetchedIds []string
}{
{
description: "fetch all not deleted",
filter: &model.SubscriptionsFilter{Paging: model.AllPagesNotDeleted()},
fetchedIds: []string{subs[2].ID, subs[1].ID, subs[0].ID},
},
{
description: "fetch all with deleted",
filter: &model.SubscriptionsFilter{Paging: model.AllPagesWithDeleted()},
fetchedIds: []string{subs[3].ID, subs[2].ID, subs[1].ID, subs[0].ID},
},
{
description: "fetch by owner",
filter: &model.SubscriptionsFilter{Owner: "tester1", Paging: model.AllPagesNotDeleted()},
fetchedIds: []string{subs[1].ID, subs[0].ID},
},
{
description: "fetch by event type",
filter: &model.SubscriptionsFilter{EventType: model.ResourceStateChangeEventType, Paging: model.AllPagesNotDeleted()},
fetchedIds: []string{subs[2].ID, subs[0].ID},
},
} {
t.Run(testCase.description, func(t *testing.T) {
fetchedSubs, err := sqlStore.GetSubscriptions(testCase.filter)
require.NoError(t, err)
assert.Equal(t, len(testCase.fetchedIds), len(fetchedSubs))
for i, b := range fetchedSubs {
assert.Equal(t, testCase.fetchedIds[i], b.ID)
}
})
}
}
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
import (
"time"
)
type SmPolicyContextData struct {
AccNetChId AccNetChId `json:"accNetChId,omitempty"`
ChargEntityAddr AccNetChargingAddress `json:"chargEntityAddr,omitempty"`
Gpsi string `json:"gpsi,omitempty"`
Supi string `json:"supi"`
InterGrpIds []string `json:"interGrpIds,omitempty"`
PduSessionId int32 `json:"pduSessionId"`
PduSessionType PduSessionType `json:"pduSessionType"`
Chargingcharacteristics string `json:"chargingcharacteristics,omitempty"`
Dnn string `json:"dnn"`
NotificationUri string `json:"notificationUri"`
AccessType AccessType `json:"accessType,omitempty"`
RatType RatType `json:"ratType,omitempty"`
ServingNetwork NetworkId `json:"servingNetwork,omitempty"`
UserLocationInfo UserLocation `json:"userLocationInfo,omitempty"`
UeTimeZone string `json:"ueTimeZone,omitempty"`
Pei string `json:"pei,omitempty"`
Ipv4Address string `json:"ipv4Address,omitempty"`
Ipv6AddressPrefix Ipv6Prefix `json:"ipv6AddressPrefix,omitempty"`
// Indicates the IPv4 address domain
IpDomain string `json:"ipDomain,omitempty"`
SubsSessAmbr Ambr `json:"subsSessAmbr,omitempty"`
SubsDefQos SubscribedDefaultQos `json:"subsDefQos,omitempty"`
// Contains the number of supported packet filter for signalled QoS rules.
NumOfPackFilter int32 `json:"numOfPackFilter,omitempty"`
// If it is included and set to true, the online charging is applied to the PDU session.
Online bool `json:"online,omitempty"`
// If it is included and set to true, the offline charging is applied to the PDU session.
Offline bool `json:"offline,omitempty"`
// If it is included and set to true, the 3GPP PS Data Off is activated by the UE.
Var3gppPsDataOffStatus bool `json:"3gppPsDataOffStatus,omitempty"`
// If it is included and set to true, the reflective QoS is supported by the UE.
RefQosIndication bool `json:"refQosIndication,omitempty"`
TraceReq *TraceData `json:"traceReq,omitempty"`
SliceInfo Snssai `json:"sliceInfo"`
QosFlowUsage QosFlowUsage `json:"qosFlowUsage,omitempty"`
ServNfId ServingNfIdentity `json:"servNfId,omitempty"`
SuppFeat string `json:"suppFeat,omitempty"`
SmfId string `json:"smfId,omitempty"`
RecoveryTime time.Time `json:"recoveryTime,omitempty"`
}
|
package syncutils
import (
"fmt"
"sync"
"time"
"github.com/iotaledger/hive.go/ds/types"
"github.com/iotaledger/hive.go/runtime/debug"
"github.com/iotaledger/hive.go/runtime/timeutil"
"github.com/iotaledger/hive.go/stringify"
)
// A StarvingMutex is a reader/writer mutual exclusion lock that allows for starvation of readers or writers by first
// prioritizing any outstanding reader or writer depending on the current mode (continue reading or continue writing).
// The lock can be held by an arbitrary number of readers or a single writer.
// The zero value for a StarvingMutex is an unlocked mutex.
//
// A StarvingMutex must not be copied after first use.
//
// If a goroutine holds a StarvingMutex for reading and another goroutine might
// call Lock, other goroutines can acquire a read lock. This allows
// recursive read locking. However, this can result in starvation of goroutines
// that tried to acquire write lock on the mutex.
// A blocked Lock call does not exclude new readers from acquiring the lock.
type StarvingMutex struct {
readersActive int
writerActive bool
pendingWriters int
mutex sync.Mutex
readerCond sync.Cond
writerCond sync.Cond
}
// NewStarvingMutex creates a new StarvingMutex.
func NewStarvingMutex() *StarvingMutex {
fm := &StarvingMutex{}
fm.readerCond.L = &fm.mutex
fm.writerCond.L = &fm.mutex
return fm
}
// RLock locks starving mutex for reading.
//
// It should not be used for recursive read locking.
// A blocked Lock call DOES NOT exclude new readers from acquiring the lock. Hence, it is starving.
func (f *StarvingMutex) RLock() {
f.mutex.Lock()
defer f.mutex.Unlock()
var doneChan chan types.Empty
if debug.GetEnabled() {
doneChan = make(chan types.Empty, 1)
go f.detectDeadlock("RLock", debug.CallerStackTrace(), doneChan)
}
for f.writerActive {
f.readerCond.Wait()
}
if debug.GetEnabled() {
close(doneChan)
}
f.readersActive++
}
// RUnlock undoes a single RLock call;
// it does not affect other simultaneous readers.
// It is a run-time error if mutex is not locked for reading
// on entry to RUnlock.
func (f *StarvingMutex) RUnlock() {
f.mutex.Lock()
if f.readersActive == 0 {
panic("RUnlock called without RLock")
}
if f.writerActive {
panic("RUnlock called while writer active")
}
f.readersActive--
if f.readersActive == 0 && f.pendingWriters > 0 {
f.mutex.Unlock()
f.writerCond.Signal()
return
}
f.mutex.Unlock()
}
// Lock locks starving mutex for writing.
// If the lock is already locked for reading or writing,
// Lock blocks until the lock is available.
//
// If there are waiting writers these will be served first before ANY reader can read again. Hence, it is starving.
func (f *StarvingMutex) Lock() {
f.mutex.Lock()
defer f.mutex.Unlock()
var doneChan chan types.Empty
if debug.GetEnabled() {
doneChan = make(chan types.Empty, 1)
go f.detectDeadlock("Lock", debug.CallerStackTrace(), doneChan)
}
f.pendingWriters++
for !f.canWrite() {
f.writerCond.Wait()
}
if debug.GetEnabled() {
close(doneChan)
}
f.pendingWriters--
f.writerActive = true
}
// Unlock unlocks starving mutex for writing. It is a run-time error if mutex is
// not locked for writing on entry to Unlock.
//
// As with Mutexes, a locked StarvingMutex is not associated with a particular
// goroutine. One goroutine may RLock (Lock) a StarvingMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (f *StarvingMutex) Unlock() {
f.mutex.Lock()
if f.readersActive > 0 {
panic("Unlock called while readers active")
}
f.writerActive = false
if f.pendingWriters == 0 {
f.mutex.Unlock()
f.readerCond.Broadcast()
return
}
f.mutex.Unlock()
f.writerCond.Signal()
}
// String returns a string representation of the StarvingMutex.
func (f *StarvingMutex) String() (humanReadable string) {
return stringify.Struct("StarvingMutex",
stringify.NewStructField("WriterActive", f.writerActive),
stringify.NewStructField("ReadersActive", f.readersActive),
stringify.NewStructField("PendingWriters", f.pendingWriters),
)
}
func (f *StarvingMutex) canWrite() bool {
return !f.writerActive && f.readersActive == 0
}
func (f *StarvingMutex) detectDeadlock(lockType string, trace string, done chan types.Empty) {
timer := time.NewTimer(debug.DeadlockDetectionTimeout)
defer timeutil.CleanupTimer(timer)
select {
case <-done:
return
case <-timer.C:
fmt.Println("possible deadlock while trying to acquire " + lockType + " (" + debug.DeadlockDetectionTimeout.String() + ") ...")
fmt.Println("\n" + trace)
}
}
|
package main
import (
"io"
"os"
"fmt"
"github.com/xiaq/sxed"
)
const (
READ_BLOCK = 4 * 1024
)
var usage = `Usage: sxed PROGRAM`
func slurp(f *os.File) ([]byte, error) {
bs := make([]byte, 0, READ_BLOCK)
for {
b := make([]byte, READ_BLOCK)
_, err := f.Read(b)
switch err {
case nil:
bs = append(bs, b...)
case io.EOF:
return bs, nil
default:
return nil, err
}
}
}
func main() {
// Parse args.
if len(os.Args) != 2 {
fmt.Println(usage)
os.Exit(1)
}
progText := os.Args[1]
program, err := sxed.Parse(progText)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
// Read input.
text, err := slurp(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(3)
}
// Evaluate.
for _, chain := range program {
text = sxed.Eval(text, chain)
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package common
import (
"sync/atomic"
"testing"
"time"
)
func TestSemaphoreWake(t *testing.T) {
var done uint32
sem := NewSemaphore(0)
go func() {
time.Sleep(10 * time.Nanosecond)
atomic.AddUint32(&done, 1)
sem.Post()
}()
sem.Wait()
doneVal := atomic.LoadUint32(&done)
if doneVal != 1 {
t.Fatalf("sem.Wait did not wait for sem.Post")
}
}
func TestSemaphoreCount(t *testing.T) {
sem := NewSemaphore(1)
sem.Post()
sem.Wait()
sem.Wait()
sem = NewSemaphore(-1)
sem.Post()
sem.Post()
sem.Wait()
}
func TestSemaphoreMultipleGoroutines(t *testing.T) {
var done uint32
sem := NewSemaphore(0)
sem2 := NewSemaphore(0)
go func() {
sem.Wait()
atomic.AddUint32(&done, 1)
sem2.Post()
}()
go func() {
time.Sleep(10 * time.Nanosecond)
atomic.AddUint32(&done, 1)
sem.Post()
}()
go func() {
time.Sleep(20 * time.Nanosecond)
atomic.AddUint32(&done, 1)
sem.Post()
}()
sem.Wait()
go func() {
time.Sleep(10 * time.Nanosecond)
atomic.AddUint32(&done, 1)
sem.Post()
}()
sem.Wait()
sem2.Wait()
doneVal := atomic.LoadUint32(&done)
if doneVal != 4 {
t.Fatalf("sem.Wait did not wait for sem.Posts")
}
}
|
package stack
type MinValueStackOpt struct {
}
func (stack *MinValueStackOpt) Pop() int {
return 0
}
func (stack *MinValueStackOpt) Push(value int) bool {
return false
}
func (stack *MinValueStackOpt) Peek() int {
return 0
}
func (stack *MinValueStackOpt) Empty() bool {
return false
}
func (stack *MinValueStackOpt) Search(value int) bool {
return false
}
func (stack *MinValueStackOpt) GetRealSize() int {
return 0
}
func (stack *MinValueStackOpt) GetMaxSize() int {
return 0
}
func (stack *MinValueStackOpt) Resize() {
}
|
package main
import "fmt"
func removeDuplicates(nums []int) []int {
if len(nums) == 0{
return []int{}
}
i:=0
for j:=1;j<len(nums);j++{
if nums[j]!=nums[i]{
i++
nums[i] = nums[j]
}
}
return nums[:i+1]
}
func main() {
nums := []int{1,1,2}
output := removeDuplicates(nums)
fmt.Println(output)
}
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s3blob_test
import (
"bytes"
"context"
"fmt"
"io"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/google/go-cloud/blob"
"github.com/google/go-cloud/blob/s3blob"
"github.com/google/go-cloud/internal/testing/setup"
"github.com/google/go-cmp/cmp"
)
const (
bucketPrefix = "go-cloud"
region = "us-east-2"
)
// TestNewBucketNaming tests if buckets can be created with incorrect characters.
func TestNewBucketNaming(t *testing.T) {
tests := []struct {
name, bucketName string
wantErr bool
}{
{
name: "A good bucket name should pass",
bucketName: "good-bucket",
},
{
name: "A name with leading digits should pass",
bucketName: "8ucket-nam3",
},
{
name: "A name with leading underscores should fail",
bucketName: "_bucketname_",
wantErr: true,
},
{
name: "A name with upper case letters should fail",
bucketName: "bucketnameUpper",
wantErr: true,
},
{
name: "A name with an invalid character should fail",
bucketName: "bucketname?invalidchar",
wantErr: true,
},
{
name: "A name that's too long should fail",
bucketName: strings.Repeat("a", 64),
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
bkt := fmt.Sprintf("%s.%s", bucketPrefix, tc.bucketName)
_, err := svc.CreateBucket(&s3.CreateBucketInput{
Bucket: &bkt,
CreateBucketConfiguration: &s3.CreateBucketConfiguration{LocationConstraint: aws.String(region)},
})
switch {
case err != nil && !tc.wantErr:
t.Errorf("got %q; want nil", err)
case err == nil && tc.wantErr:
t.Errorf("got nil error; want error")
case !tc.wantErr:
forceDeleteBucket(svc, bkt)
}
})
}
}
func TestNewWriterObjectNaming(t *testing.T) {
tests := []struct {
name, objName string
wantErr bool
}{
{
name: "An ASCII name should pass",
objName: "object-name",
},
{
name: "A Unicode name should pass",
objName: "文件名",
},
{
name: "An empty name should fail",
wantErr: true,
},
{
name: "A name of escaped chars should fail",
objName: "\xF4\x90\x80\x80",
wantErr: true,
},
{
name: "A name of 1024 chars should succeed",
objName: strings.Repeat("a", 1024),
},
{
name: "A name of 1025 chars should fail",
objName: strings.Repeat("a", 1025),
wantErr: true,
},
{
name: "A long name of Unicode chars should fail",
objName: strings.Repeat("☺", 342),
wantErr: true,
},
}
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
bkt := fmt.Sprintf("%s.%s", bucketPrefix, "test-obj-naming")
_, err := svc.CreateBucket(&s3.CreateBucketInput{
Bucket: &bkt,
CreateBucketConfiguration: &s3.CreateBucketConfiguration{LocationConstraint: aws.String(region)},
})
defer forceDeleteBucket(svc, bkt)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
sess, done := setup.NewAWSSession(t, region)
defer done()
b, err := s3blob.OpenBucket(ctx, sess, bkt)
if err != nil {
t.Fatal(err)
}
w, err := b.NewWriter(ctx, tc.objName, nil)
if err != nil {
t.Fatal(err)
}
_, err = io.WriteString(w, "foo")
if err != nil {
t.Fatal(err)
}
err = w.Close()
switch {
case err != nil && !tc.wantErr:
t.Errorf("got %q; want nil", err)
case err == nil && tc.wantErr:
t.Errorf("got nil; want error")
}
})
}
}
func TestRead(t *testing.T) {
content := []byte("something worth reading")
contentSize := int64(len(content))
tests := []struct {
name string
offset, length int64
want []byte
got []byte
wantSize int64
wantError bool
}{
{
name: "negative offset",
offset: -1,
wantError: true,
},
{
name: "read metadata",
length: 0,
want: make([]byte, 0),
got: make([]byte, 0),
wantSize: contentSize,
},
{
name: "read from positive offset to end",
offset: 10,
length: -1,
want: content[10:],
got: make([]byte, contentSize-10),
wantSize: contentSize - 10,
},
{
name: "read a part in middle",
offset: 10,
length: 5,
want: content[10:15],
got: make([]byte, 5),
wantSize: 5,
},
{
name: "read in full",
offset: 0,
length: -1,
want: content,
got: make([]byte, contentSize),
wantSize: contentSize,
},
}
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
bkt := fmt.Sprintf("%s.%s", bucketPrefix, "test-read")
_, err := svc.CreateBucket(&s3.CreateBucketInput{
Bucket: &bkt,
CreateBucketConfiguration: &s3.CreateBucketConfiguration{LocationConstraint: aws.String(region)},
})
defer forceDeleteBucket(svc, bkt)
if err != nil {
t.Fatal(err)
}
obj := "test_read"
uploader := s3manager.NewUploader(sess)
if _, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bkt),
Key: aws.String(obj),
Body: bytes.NewReader(content),
}); err != nil {
t.Fatalf("error uploading test object: %v", err)
}
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
sess, done := setup.NewAWSSession(t, region)
defer done()
b, err := s3blob.OpenBucket(ctx, sess, bkt)
if err != nil {
t.Fatal(err)
}
r, err := b.NewRangeReader(context.Background(), obj, tc.offset, tc.length)
switch {
case err != nil && !tc.wantError:
t.Fatalf("cannot create new reader: %v", err)
case err == nil && tc.wantError:
t.Fatal("got nil error; want error")
case tc.wantError:
return
}
if _, err := r.Read(tc.got); err != nil && err != io.EOF {
t.Fatalf("error during read: %v", err)
}
if !cmp.Equal(tc.got, tc.want) || r.Size() != tc.wantSize {
t.Errorf("got %s of size %d; want %s of size %d", tc.got, r.Size(), tc.want, tc.wantSize)
}
r.Close()
})
}
}
func TestWrite(t *testing.T) {
tests := []struct {
name, obj string
want []byte
wantErr bool
contentType string
wantSize int64
}{
{
name: "write HTML",
obj: "write_html",
want: []byte("Hello, HTML!"),
contentType: "text/html",
wantSize: 12,
},
{
name: "write JSON",
obj: "write_json",
want: []byte("Hello, JSON!"),
contentType: "application/json",
wantSize: 12,
},
}
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
bkt := fmt.Sprintf("%s.%s", bucketPrefix, "test-write")
_, err := svc.CreateBucket(&s3.CreateBucketInput{
Bucket: &bkt,
CreateBucketConfiguration: &s3.CreateBucketConfiguration{LocationConstraint: aws.String(region)},
})
defer forceDeleteBucket(svc, bkt)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
b, err := s3blob.OpenBucket(ctx, sess, bkt)
if err != nil {
t.Fatal(err)
}
opts := &blob.WriterOptions{
ContentType: tc.contentType,
}
w, err := b.NewWriter(ctx, tc.obj, opts)
if err != nil {
t.Errorf("error creating writer: %v", err)
}
n, err := w.Write(tc.want)
if n != len(tc.want) || err != nil {
t.Errorf("writing object: %d written, got error %v", n, err)
}
if err := w.Close(); err != nil {
t.Fatalf("error closing writer: %v", err)
}
req, resp := svc.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String(bkt),
Key: aws.String(tc.obj),
})
if err := req.Send(); err != nil {
t.Fatalf("error getting object: %v", err)
}
body := resp.Body
got := make([]byte, tc.wantSize)
n, err = body.Read(got)
if err != nil && err != io.EOF {
t.Errorf("reading object: %d read, got error %v", n, err)
}
defer body.Close()
if !cmp.Equal(got, tc.want) || int64(n) != tc.wantSize || *resp.ContentType != tc.contentType {
t.Errorf("got %s, size %d, content-type %s, want %s, size %d, content-type %s",
got, n, *resp.ContentType, tc.want, tc.wantSize, tc.contentType)
}
})
}
}
func TestCloseWithoutWrite(t *testing.T) {
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
bkt := fmt.Sprintf("%s.%s", bucketPrefix, "test-close-without-write")
_, err := svc.CreateBucket(&s3.CreateBucketInput{
Bucket: &bkt,
CreateBucketConfiguration: &s3.CreateBucketConfiguration{LocationConstraint: aws.String(region)},
})
defer forceDeleteBucket(svc, bkt)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
b, err := s3blob.OpenBucket(ctx, sess, bkt)
if err != nil {
t.Fatal(err)
}
obj := "test_close_without_write"
w, err := b.NewWriter(ctx, obj, nil)
if err != nil {
t.Errorf("error creating new writer: %v", err)
}
if err := w.Close(); err != nil {
t.Errorf("error closing writer without writing: %v", err)
}
req, resp := svc.HeadObjectRequest(&s3.HeadObjectInput{
Bucket: aws.String(bkt),
Key: aws.String(obj),
})
err = req.Send()
size := aws.Int64Value(resp.ContentLength)
if err != nil || size != 0 {
t.Errorf("want 0 bytes written, got %d bytes written, error %v", size, err)
}
if err := b.Delete(ctx, obj); err != nil {
t.Errorf("error deleting object: %v", err)
}
}
func TestDelete(t *testing.T) {
sess, done := setup.NewAWSSession(t, region)
defer done()
svc := s3.New(sess)
bkt := fmt.Sprintf("%s.%s", bucketPrefix, "test-delete")
_, err := svc.CreateBucket(&s3.CreateBucketInput{
Bucket: &bkt,
CreateBucketConfiguration: &s3.CreateBucketConfiguration{LocationConstraint: aws.String(region)},
})
defer forceDeleteBucket(svc, bkt)
if err != nil {
t.Fatal(err)
}
ctx := context.Background()
obj := "test_delete"
uploader := s3manager.NewUploader(sess)
if _, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bkt),
Key: aws.String(obj),
Body: bytes.NewReader([]byte("something obsolete")),
}); err != nil {
t.Fatalf("error uploading test object: %v", err)
}
b, err := s3blob.OpenBucket(ctx, sess, bkt)
if err != nil {
t.Fatal(err)
}
if err := b.Delete(ctx, obj); err != nil {
t.Errorf("error occurs when deleting an existing object: %v", err)
}
req, _ := svc.HeadObjectRequest(&s3.HeadObjectInput{
Bucket: aws.String(bkt),
Key: aws.String(obj),
})
if err := req.Send(); err == nil {
t.Errorf("object deleted, got err %v, want NotFound error", err)
}
if err := b.Delete(ctx, obj); err == nil || !blob.IsNotExist(err) {
t.Errorf("Delete: got %#v, want not exist error", err)
}
}
// This function doesn't report errors back because they're not really useful.
// If the bucket can't be deleted it'll become obvious later.
func forceDeleteBucket(svc *s3.S3, bucket string) {
resp, _ := svc.ListObjects(&s3.ListObjectsInput{Bucket: &bucket})
var objs []*s3.ObjectIdentifier
for _, o := range resp.Contents {
objs = append(objs, &s3.ObjectIdentifier{Key: aws.String(*o.Key)})
}
var items s3.Delete
items.SetObjects(objs)
_, _ = svc.DeleteObjects(&s3.DeleteObjectsInput{Bucket: &bucket, Delete: &items})
_, _ = svc.DeleteBucket(&s3.DeleteBucketInput{Bucket: &bucket})
_ = svc.WaitUntilBucketNotExists(&s3.HeadBucketInput{Bucket: &bucket})
}
|
package main
import "github.com/codingXiang/gecko/cmd"
//go:generate go run main.go general model -s ./example -f user.go -d ./output/model
//go:generate go run main.go general repo -s ./output/model -f user.go -d ./output/module -p user
//go:generate go run main.go general svc -s ./output/model -f user.go -d ./output/module -p user
//go:generate go run main.go general delivery http -s ./output/module/user -f service.go -d ./output/module -p user
func main() {
cmd.Execute()
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"sort"
"strconv"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(stdin io.Reader, stdout io.Writer) {
sc := bufio.NewScanner(stdin)
sc.Scan()
n, _ := strconv.Atoi(sc.Text())
sc.Scan()
r, _ := strconv.Atoi(sc.Text())
x := []int{}
for i := 0; i < n; i++ {
sc.Scan()
xi, _ := strconv.Atoi(sc.Text())
x = append(x, xi)
}
a := greedySearch(n, r, x)
fmt.Fprintln(stdout, a)
}
func greedySearch(n, r int, x []int) (count int) {
sort.Slice(x, func(i, j int) bool { return x[i] < x[j] })
covered := -1
for i := 0; i < n; i++ {
if x[i] <= covered {
continue
}
var marked int
for j := i; j < n; j++ {
if j == i || x[j] <= x[i]+r {
marked = x[j]
}
}
covered = marked + r
count++
}
return
}
|
package sshttp
import (
"bytes"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
const (
// sftpNoSuchFile is the error code returned by SFTP if access is attempted
// to a file which does not exist.
sftpNoSuchFile = 2
)
// RoundTripper implements http.RoundTripper, and handles performing a HTTP
// request over SSH, using SFTP to send a file in response. A RoundTripper can
// automatically dial SSH hosts when RoundTrip is called, assuming the correct
// default credentials are provided. If more control is needed, use the Dial
// method to configure each host on an individual basis.
type RoundTripper struct {
config *ssh.ClientConfig
conn map[string]*clientPair
}
// NewRoundTripper accepts a ssh.ClientConfig struct and returns a
// RoundTripper which can be used by net/http. The configuration parameter
// is used as the default for any SSH hosts which are not explicitly configured
// using the Dial method.
func NewRoundTripper(config *ssh.ClientConfig) *RoundTripper {
return &RoundTripper{
config: config,
conn: make(map[string]*clientPair),
}
}
// Dial attempts to dial a SSH connection to the specified host, using the
// specified SSH client configuration. If the config parameter is nil,
// the default set by NewRoundTripper will be used.
//
// Dial should be used if more than a single host is being dialed by
// RoundTripper, so that various SSH client configurations may be used, if
// needed. For a single host, allowing RoundTripper to lazily dial a host
// using the default SSH client configuration is typically acceptable.
func (rt *RoundTripper) Dial(host string, config *ssh.ClientConfig) error {
// Use default configuration if none specified
if config == nil {
config = rt.config
}
// Create clientPair with SSH and SFTP clients
pair, err := dialSSHSFTP(host, config)
if err != nil {
return err
}
rt.conn[host] = pair
return nil
}
// Close closes all open SFTP and SSH connections for this RoundTripper.
func (rt *RoundTripper) Close() error {
// Attempt to close each SFTP and SSH connection. Map iteration
// order is undefined in Go, but this is okay for our purposes.
for k := range rt.conn {
if err := rt.conn[k].sftpc.Close(); err != nil {
return err
}
if err := rt.conn[k].sshc.Close(); err != nil {
return err
}
delete(rt.conn, k)
}
return nil
}
// RoundTrip implements http.RoundTripper, and performs a HTTP request over SSH,
// using SFTP to coordinate the response. If a SSH connection is not already
// open to the host specified in r.URL.Host, RoundTrip will attempt to lazily
// dial the host using the default configuration from NewRoundTripper.
func (rt *RoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
// Attempt to dial the request host, if needed
p, err := rt.lazyDial(r.URL.Host)
if err != nil {
return nil, err
}
switch r.Method {
// GET - retrieve a file's contents from the remote filesystem
case "GET":
return get(p, r)
}
// Invalid HTTP method
return httpResponse(http.StatusMethodNotAllowed, nil, nil), nil
}
// lazyDial attempts to dial a connection to a host if one is not already
// open. If a connection is open, it returns that connection's clientPair.
func (rt *RoundTripper) lazyDial(host string) (*clientPair, error) {
// Check for an existing, open connection
p, ok := rt.conn[host]
if ok {
return p, nil
}
// Dial a new connection using the default config
if err := rt.Dial(host, rt.config); err != nil {
return nil, err
}
// Use the new connection for this RoundTrip
return rt.conn[host], nil
}
// get attempts to retrieve a file from a remote filesystem over SSH, using SFTP
// to return the file's contents in a HTTP response body.
func get(p *clientPair, r *http.Request) (*http.Response, error) {
// Check for the requested file in the remote filesystem
f, err := p.sftpc.Open(r.URL.Path)
if err != nil {
serr, ok := err.(*sftp.StatusError)
if !ok {
return nil, err
}
// If file does not exist, send a 404
if serr.Code == sftpNoSuchFile {
return httpResponse(http.StatusNotFound, nil, nil), nil
}
return nil, err
}
// Stat the file to retrieve size and modtime
stat, err := f.Stat()
if err != nil {
return nil, err
}
// Attach headers for file information
h := http.Header{}
h.Set("Content-Length", strconv.FormatInt(stat.Size(), 10))
h.Set("Last-Modified", stat.ModTime().UTC().Format(http.TimeFormat))
// Attempt to discover Content-Type using file extension
cType := mime.TypeByExtension(filepath.Ext(stat.Name()))
if cType != "" {
h.Set("Content-Type", cType)
} else {
// As a fallback, read the first 512 bytes of the file
// to determine its content type
buf := bytes.NewBuffer(nil)
rn, err := io.CopyN(buf, f, 512)
if err != nil {
return nil, err
}
h.Set("Content-Type", http.DetectContentType(buf.Bytes()[:rn]))
// Rewind file so the entire file can be transferred
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
return nil, err
}
}
// Open an in-memory pipe to stream the file from disk to the HTTP response
pr, pw := io.Pipe()
go func() {
// Transfer file bytes and clean up
var sErr stickyError
_, err := io.CopyN(pw, f, stat.Size())
sErr.Set(err)
sErr.Set(f.Close())
// Send any errors during streaming or cleanup to the client
// This method always returns nil error.
_ = pw.CloseWithError(sErr.Get())
}()
// Send HTTP response with code, pipe reader body, and headers
return httpResponse(
http.StatusOK,
pr,
h,
), nil
}
// httpResponse builds a HTTP response with typical headers using an input
// HTTP status code, response body, and initial HTTP headers.
func httpResponse(code int, body io.ReadCloser, headers http.Header) *http.Response {
res := &http.Response{
StatusCode: code,
ProtoMajor: 1,
ProtoMinor: 1,
Body: body,
}
// Apply parameter headers and identify server
h := http.Header{}
h.Set("Server", "github.com/mdlayher/sshttp")
for k, v := range headers {
for _, vv := range v {
h.Add(k, vv)
}
}
// Apply defaults for headers, if they do not already exist
const date = "Date"
if h.Get(date) == "" {
h.Set(date, time.Now().UTC().Format(http.TimeFormat))
}
const contentType = "Content-Type"
if h.Get(contentType) == "" {
h.Set(contentType, "text/plain; charset=utf-8")
}
const connection = "Connection"
if code != http.StatusOK && h.Get(connection) == "" {
h.Set(connection, "close")
}
res.Header = h
return res
}
|
package sqlstore
import (
"bytes"
"fmt"
"github.com/ssok8s/ssok8s/pkg/bus"
m "github.com/ssok8s/ssok8s/pkg/models"
"github.com/ssok8s/ssok8s/pkg/util"
"strconv"
"strings"
"time"
)
func init() {
bus.AddHandler("sql", CreateUser)
bus.AddHandler("sql", DeleteUserByUsername)
bus.AddHandler("sql", DeleteUser)
bus.AddHandler("sql", UpdateUser)
bus.AddHandler("sql", GetUserByUsername)
bus.AddHandler("sql", GetSignedInUserById)
bus.AddHandler("sql", CreateAdminUser)
bus.AddHandler("sql", ResetPassword)
bus.AddHandler("sql", UpdatePassword)
bus.AddHandler("sql", UpdateUserRoleAndGroup)
bus.AddHandler("sql", listUsers)
bus.AddHandler("sql", GetHasPermissionUserIdsByUserId)
}
func CreateUser(cmd *m.CreateUserCommand) error {
return inTransaction(func(sess *DBSession) error {
user := m.SsoUser{
Address: cmd.Address,
Alternative_email: cmd.Alternative_email,
Avatar: cmd.Avatar,
City: cmd.City,
Company: cmd.Company,
Contact_phone: cmd.Contact_phone,
Country: cmd.Country,
Creation_time: time.Now().Unix(),
Last_modified_pwd_time: time.Now().Unix(),
Creator: cmd.Creator,
Mobile_phone: cmd.Mobile_phone,
Office_phone: cmd.Office_phone,
Origin: cmd.Origin,
Postal_code: cmd.Postal_code,
Role: cmd.Role,
Signed_in_frequency_counter: cmd.Signed_in_frequency_counter,
Username: cmd.Username,
Password: strconv.FormatInt(time.Now().Unix(), 10),
Id: cmd.Id,
Status: cmd.Status,
}
user.Salt = util.GetRandomString(10)
if len(user.Password) > 0 {
user.Password = util.EncodePassword(user.Password, user.Salt)
}
if len(user.Password) > 0 {
user.Password = util.EncodePassword(fmt.Sprintf("%v@Ss0", user.Password[0:8]), user.Salt)
}
if _, err := sess.Insert(&user); err != nil {
return err
}
for _, group := range cmd.UserGroups {
userGroup := m.SsoUserGroup{
UserId: cmd.Id,
GroupId: group.GroupId,
GroupRole: group.Group_role,
}
if _, err := sess.Insert(&userGroup); err != nil {
return err
}
}
cmd.Result = user
return nil
})
}
func CreateAdminUser(cmd *m.CreateAdminUserCommand) error {
return inTransaction(func(sess *DBSession) error {
user := m.SsoUser{
Address: cmd.Address,
Alternative_email: cmd.Alternative_email,
Avatar: cmd.Avatar,
City: cmd.City,
Company: cmd.Company,
Contact_phone: cmd.Contact_phone,
Country: cmd.Country,
Creation_time: time.Now().Unix(),
Last_modified_pwd_time: time.Now().Unix(),
Creator: cmd.Creator,
Mobile_phone: cmd.Mobile_phone,
Office_phone: cmd.Office_phone,
Origin: cmd.Origin,
Postal_code: cmd.Postal_code,
Role: cmd.Role,
Signed_in_frequency_counter: cmd.Signed_in_frequency_counter,
Username: cmd.Username,
Password: cmd.Password,
Id: cmd.Id,
Status: cmd.Status,
}
user.Salt = util.GetRandomString(10)
if len(cmd.Password) > 0 {
user.Password = util.EncodePassword(user.Password, user.Salt)
}
if _, err := sess.Insert(&user); err != nil {
return err
}
cmd.Result = user
return nil
})
}
func DeleteUser(cmd *m.DeleteUserCommand) error {
return inTransaction(func(sess *DBSession) error {
deletes := []string{
"DELETE FROM sso_user WHERE id = ?",
"DELETE FROM sso_user_group WHERE user_id =?",
}
for _, sql := range deletes {
_, err := sess.Exec(sql, cmd.UserId)
if err != nil {
return err
}
}
return nil
})
}
func GetUserByUsername(Query *m.GetUserByUsernameQuery) error {
if Query.Username == "" {
return m.ErrUserNotFound
}
/*user := m.SsoUser{
Username:Query.Username,
}
has, err := x.Get(&user)
if err != nil {
return err
}
if !has {
return m.ErrUserNotFound
}
Query.Result = &user*/
ssoUser := make([]*m.SsoUser, 0)
sess := x.Table("sso_user")
sess.Where("username =?", strings.ToLower(Query.Username))
err := sess.Find(&ssoUser)
if err != nil {
return err
}
if len(ssoUser) == 0 {
return m.ErrUserNotFound
}
Query.Result = ssoUser[0]
return nil
}
func DeleteUserByUsername(cmd *m.DeleteUserByUsernameCommand) error {
return inTransaction(func(sess *DBSession) error {
deletes := []string{
"DELETE FROM sso_user WHERE username = ?",
}
for _, sql := range deletes {
_, err := sess.Exec(sql, cmd.Username)
if err != nil {
return err
}
}
return nil
})
}
func UpdateUser(cmd *m.UpdateUserCommand) error {
return inTransaction(func(sess *DBSession) error {
sess.Where("id=?", cmd.User.Id)
_, err := sess.Update(cmd.User)
return err
})
}
func GetSignedInUserById(query *m.GetSignedInUserByIdQuery) error {
/*team := make([]*m.SignedInUser, 0)
sess := x.Table("sso_user")
sess.Where("id =?",query.Userid)
err:= sess.Find(&team)
if err!=nil {
return err
}*/
ssoUsers := make([]*m.SsoUser, 0)
sess := x.Table("sso_user")
sess.Where("id =?", strings.ToLower(query.Userid))
err := sess.Find(&ssoUsers)
if err != nil {
return err
}
if len(ssoUsers) != 1 {
return m.ErrUserNotFound
}
ssoUser := ssoUsers[0]
UserResult := m.SignedInUser{
Id: ssoUser.Id,
Username: ssoUser.Username,
Address: ssoUser.Address,
Alternative_email: ssoUser.Alternative_email,
Avatar: ssoUser.Avatar,
Creator: ssoUser.Creator,
Country: ssoUser.Country,
Company: ssoUser.Company,
City: ssoUser.City,
Creation_time: ssoUser.Creation_time,
Contact_phone: ssoUser.Contact_phone,
Expiration_time: ssoUser.Expiration_time,
First_name: ssoUser.First_name,
Groups: ssoUser.Groups,
Industry: ssoUser.Industry,
Last_name: ssoUser.Last_name,
Last_signed_in_time: ssoUser.Last_signed_in_time,
Last_modified_time: ssoUser.Last_modified_time,
Last_modified_pwd_time: ssoUser.Last_modified_pwd_time,
Last_long: ssoUser.Last_long,
Last_lat: ssoUser.Last_lat,
Last_ip: ssoUser.Last_ip,
Last_city: ssoUser.Last_city,
Mobile_phone: ssoUser.Mobile_phone,
Office_phone: ssoUser.Office_phone,
Origin: ssoUser.Origin,
Postal_code: ssoUser.Postal_code,
Role: ssoUser.Role,
Signed_in_frequency_counter: ssoUser.Signed_in_frequency_counter,
Salt: ssoUser.Salt,
Status: ssoUser.Status,
Total_signed_in_times: ssoUser.Total_signed_in_times,
}
UserRoleResult := make([]*m.UserRole, 0)
sess = x.Table("sso_user_role")
sess.Where("user_id = ?", UserResult.Id)
sess.Cols("role_id", "role_name", "workspace_id", "namespace_id", "cluster_id")
if err := sess.Find(&UserRoleResult); err != nil {
return err
}
UserResult.Roles = UserRoleResult
GroupResult := make([]*m.UserGroupDto, 0)
sess = x.Table("sso_user_group")
sess.Join("inner", "sso_user", fmt.Sprintf("sso_user_group.user_id=%s.id", "sso_user"))
sess.Join("LEFT", "sso_group", fmt.Sprintf("sso_user_group.group_id=%s.id", "sso_group"))
sess.Where("sso_user_group.user_id = ?", UserResult.Id)
sess.Cols("sso_user_group.group_role", "sso_user_group.group_id", "sso_group.company", "sso_group.name")
if err := sess.Find(&GroupResult); err != nil {
return err
}
UserResult.UserGroups = GroupResult
ScopesResult := make([]*m.UserScopeDto, 0)
sess = x.Table("sso_user_srp_scope")
sess.Where("user_id = ?", ssoUser.Id)
sess.Cols("srp_app_id", "srp_role")
if err := sess.Find(&ScopesResult); err != nil {
return err
}
scopes := make([]string, 0)
for _, scope := range ScopesResult {
scopes = append(scopes, scope.SrpAppId+"."+scope.SrpRole)
}
UserResult.Scopes = scopes
query.Result = &UserResult
return nil
}
func ResetPassword(cmd *m.ResetPasswordCommand) error {
return inTransaction(func(sess *DBSession) error {
return nil
})
}
func UpdatePassword(cmd *m.UpdatePasswordCommand) error {
return inTransaction(func(sess *DBSession) error {
//sess.Table("sso_user").Update(cmd.User)
sess.Where("id=?", cmd.User.Id)
_, err := sess.Update(cmd.User)
return err
})
}
func UpdateUserRoleAndGroup(cmd *m.UpdateUserRoleAndGroupCommand) error {
return inTransaction(func(sess *DBSession) error {
sess.Where("id=?", cmd.User.Id)
_, err := sess.Update(cmd.User)
if err != nil {
return err
}
sql := "DELETE FROM sso_user_group where user_id =?"
_, err = sess.Exec(sql, cmd.User.Id)
if err != nil {
return err
}
for _, group := range cmd.Groups {
userGroup := m.SsoUserGroup{
UserId: cmd.User.Id,
GroupId: group.GroupId,
GroupRole: group.Group_role,
}
if _, err := sess.Insert(&userGroup); err != nil {
return err
}
}
return nil
})
}
func listUsers(query *m.ListUsersQuery) error {
params := make([]interface{}, 0)
ssoUsers := make([]*m.SsoUserDto, 0)
sql := `
SELECT * FROM sso_user WHERE 1=1
`
//sess := x.Table("sso_user")
if query.Name != "" {
//sess.Where("sso_user.username like ?", "%"+query.Name+"%")
sql += "AND sso_user.username like ?"
params = append(params, "%"+query.Name+"%")
}
if len(query.Userids) > 0 {
//filter = "(org_id=? AND team_id IN (?" + strings.Repeat(",?", len(query.Userids)-1) + ")) OR "
for _,userid := range query.Userids{
params = append(params, userid)
}
sql += "AND sso_user.id IN (?" + strings.Repeat(",?", len(query.Userids)-1) + ")"
}
if query.Direction == "asc" {
sql += "ORDER BY sso_user."+query.Property+" ASC"
} else if query.Direction == "DESC" {
sql += "ORDER BY sso_user."+query.Property+" DESC"
}
sql +=fmt.Sprintf(" LIMIT %d OFFSET %d", query.MaxResults, (query.Page-1)*query.MaxResults)
//sess.Limit(query.MaxResults, (query.Page-1)*query.MaxResults)
//err := sess.Find(&ssoUsers)
if err := x.SQL(sql, params...).Find(&ssoUsers); err != nil {
return err
}
//userlist := make([]*m.UserDetails,0)
/*for _,ssoUser := range ssoUsers{
UserResult := m.UserDetails{
Id: ssoUser.Id,
Username: ssoUser.Username,
Address: ssoUser.Address,
Alternative_email: ssoUser.Alternative_email,
Avatar: ssoUser.Avatar,
Creator: ssoUser.Creator,
Country: ssoUser.Country,
Company: ssoUser.Company,
City: ssoUser.City,
Creation_time: ssoUser.Creation_time,
Contact_phone: ssoUser.Contact_phone,
Expiration_time: ssoUser.Expiration_time,
First_name: ssoUser.First_name,
Groups: ssoUser.Groups,
Industry: ssoUser.Industry,
Last_name: ssoUser.Last_name,
Last_signed_in_time: ssoUser.Last_signed_in_time,
Last_modified_time: ssoUser.Last_modified_time,
Last_modified_pwd_time: ssoUser.Last_modified_pwd_time,
Last_long: ssoUser.Last_long,
Last_lat: ssoUser.Last_lat,
Last_ip: ssoUser.Last_ip,
Last_city: ssoUser.Last_city,
Mobile_phone: ssoUser.Mobile_phone,
Office_phone: ssoUser.Office_phone,
Origin: ssoUser.Origin,
Postal_code: ssoUser.Postal_code,
Role: ssoUser.Role,
Signed_in_frequency_counter: ssoUser.Signed_in_frequency_counter,
Salt: ssoUser.Salt,
Status: ssoUser.Status,
Total_signed_in_times: ssoUser.Total_signed_in_times,
}
//todo add user permission
permissonJson := simplejson.New()
UserResult.Permission = permissonJson
GroupResult := make([]*m.UserGroupDto, 0)
sess = x.Table("sso_user_group")
sess.Join("inner", "sso_user", fmt.Sprintf("sso_user_group.user_id=%s.id", "sso_user"))
sess.Join("LEFT", "sso_group", fmt.Sprintf("sso_user_group.group_id=%s.id", "sso_group"))
sess.Cols("sso_user_group.group_role", "sso_user_group.group_id", "sso_group.company", "sso_group.name")
sess.Where("sso_user_group.user_id = ?", UserResult.Id)
if err := sess.Find(&GroupResult); err != nil {
return err
}
UserResult.UserGroups = GroupResult
ScopesResult := make([]*m.UserScopeDto,0)
sess = x.Table("sso_user_srp_scope")
sess.Where("user_id=?",ssoUser.Id)
sess.Cols("srp_app_id","srp_role")
if err:=sess.Find(&ScopesResult);err!=nil {
return err
}
scopes := make([]string,0)
for _,scope :=range ScopesResult{
scopes=append(scopes, scope.SrpAppId+","+scope.SrpRole)
}
UserResult.Scopes = scopes
userlist=append(userlist, &UserResult)
}*/
query.UserList = ssoUsers
return nil
}
func GetHasPermissionUserIdsByUserId(query *m.GetHasPermissionUserIdsByUserIdQuery) error {
userRole := make([]*m.SsoUserRole, 0)
sess := x.Table("sso_user_role")
sess.Where("user_id = ?", query.UserId)
err := sess.Find(&userRole)
if err != nil {
return err
}
if len(userRole)==0{
return m.ErrUserRoleNotFound
}
var sql bytes.Buffer
params := make([]interface{}, 0)
sql.WriteString(`
SELECT
DISTINCT(user_id)
FROM sso_user_role
WHERE 1 =1
`)
sql.WriteString("AND (")
for key, role := range userRole {
if role.RoleName == string(m.ROLE_WORKSPACE_ADMIN) {
if key != 0 {
sql.WriteString("OR")
}
sql.WriteString("(")
sql.WriteString("workspace_id = ? AND cluster_id = ?")
params = append(params, role.WorkspaceId)
params = append(params, role.ClusterId)
sql.WriteString(")")
}
if role.RoleName == string(m.ROLE_CLUSTER_ADMIN) || role.RoleName == string(m.ROLE_CLUSTER_OWNER) {
if key != 0 {
sql.WriteString("OR")
}
sql.WriteString("(")
sql.WriteString("cluster_id = ?")
params = append(params, role.ClusterId)
sql.WriteString(")")
}
}
sql.WriteString(")")
userIds := make([]*m.UserId,0)
fmt.Println(sql.String())
if err := x.SQL(sql.String(),params...).Find(&userIds);err!=nil {
return err
}
queryUserIds := make([]string,0)
for _,userid := range userIds {
queryUserIds= append(queryUserIds,userid.UserId)
}
query.Result=queryUserIds
return nil
}
|
/*
İki çeşit map tanımlama
Map içinde key var mı sorgusu
Map den item silme
For ile map içinde dönme
new ile make arasındaki fark: new poinder döner
*/
package main
import "fmt"
func main() {
m1 := make(map[string]int)
m1["k1"] = 1
m1["k2"] = 2
fmt.Println("m1:", m1)
delete(m1, "k1")
fmt.Println("m1:", m1)
m2 := map[string]int{
"k1": 1,
"k2": 2,
}
fmt.Println("m2:", m2)
//Key var mı?
_, ok := m1["notInMap"]
if ok {
fmt.Println("Exists!")
} else {
fmt.Println("Does NOT exist")
}
for key, value := range m2 {
fmt.Println(key, value)
}
}
|
// Package main ...
package main
import (
"log"
"time"
"github.com/go-rod/rod"
)
// This example demonstrates how to use a selector to click on an element.
func main() {
page := rod.New().
MustConnect().
Trace(true). // log useful info about what rod is doing
Timeout(15 * time.Second).
MustPage("https://pkg.go.dev/time/")
// wait for footer element is visible (ie, page is loaded)
page.MustElement(`body > footer`).MustWaitVisible()
// find and click "Expand All" link
page.MustElement(`#pkg-examples`).MustClick()
// retrieve the value of the textarea
example := page.MustElement(`#example-After textarea`).MustText()
log.Printf("Go's time.After example:\n%s", example)
}
|
package main
import (
"flag"
"log"
"net/http"
"os"
"github.com/RackHD/ipam/controllers"
"github.com/RackHD/ipam/ipam"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"gopkg.in/mgo.v2"
)
var mongo string
func init() {
flag.StringVar(&mongo, "mongo", "ipam_mongo:27017", "port to connect to mongodb container")
}
func main() {
flag.Parse()
// Default to enable mgo debug. Set to false to disable.
var mgoDebug = true
if mgoDebug {
mgo.SetDebug(true)
var aLogger *log.Logger
aLogger = log.New(os.Stderr, "", log.LstdFlags)
mgo.SetLogger(aLogger)
}
// Start off with a new mux router.
router := mux.NewRouter().StrictSlash(true)
session, err := mgo.Dial(mongo)
if err != nil {
log.Fatalf("%s", err)
}
defer session.Close()
// Create the IPAM business logic object.
ipam, err := ipam.NewIpam(session)
if err != nil {
log.Fatalf("%s", err)
}
// Oddly enough don't need to capture the router for it to continue to exist.
_, err = controllers.NewPoolsController(router, ipam)
if err != nil {
log.Fatalf("%s", err)
}
_, err = controllers.NewSubnetsController(router, ipam)
if err != nil {
log.Fatalf("%s", err)
}
_, err = controllers.NewReservationsController(router, ipam)
if err != nil {
log.Fatalf("%s", err)
}
_, err = controllers.NewLeasesController(router, ipam)
if err != nil {
log.Fatalf("%s", err)
}
// Show off request logging middleware.
logged := handlers.LoggingHandler(os.Stdout, router)
log.Printf("Listening on port 8000...")
http.ListenAndServe(":8000", logged)
}
|
package utils
import (
"encoding/json"
"fmt"
"time"
)
type TimeStringBetween struct {
Src []string
start time.Time
end time.Time
}
func NewTimeStringBetween(data []byte) (*TimeStringBetween, error) {
t := &TimeStringBetween{}
err := json.Unmarshal(data, t)
if err != nil {
return nil, err
}
return t, nil
}
func (t *TimeStringBetween) MarshalJSON() ([]byte, error) {
return json.Marshal(t.Src)
}
func (t *TimeStringBetween) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &t.Src)
if err != nil {
return err
}
if len(t.Src) != 2 {
return fmt.Errorf("invalid length")
}
var (
timezone = "Asia/Shanghai"
tmp = make([]time.Time, 2)
date = time.Now().Format("2006-01-02")
)
for i, s := range t.Src {
tmp[i], err = ParseWithLocation(timezone, fmt.Sprintf("%s %s:00", date, s))
if err != nil {
return err
}
}
if tmp[0].Before(tmp[1]) {
t.start = tmp[0]
t.end = tmp[1]
} else {
t.start = tmp[1]
t.end = tmp[0]
}
return nil
}
func (t *TimeStringBetween) Between(target time.Time) bool {
return target.After(t.start) && target.Before(t.end)
}
func (t *TimeStringBetween) End() time.Time {
return t.end
}
func (t *TimeStringBetween) Start() time.Time {
return t.start
}
|
package collect
import (
"bytes"
"encoding/json"
"fmt"
"strings"
"github.com/go-redis/redis/v7"
"github.com/pkg/errors"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
)
func Redis(c *Collector, databaseCollector *troubleshootv1beta2.Database) (CollectorResult, error) {
databaseConnection := DatabaseConnection{}
opt, err := redis.ParseURL(databaseCollector.URI)
if err != nil {
databaseConnection.Error = err.Error()
} else {
client := redis.NewClient(opt)
stringResult := client.Info("server")
if stringResult.Err() != nil {
databaseConnection.Error = stringResult.Err().Error()
}
databaseConnection.IsConnected = stringResult.Err() == nil
if databaseConnection.Error == "" {
lines := strings.Split(stringResult.Val(), "\n")
for _, line := range lines {
lineParts := strings.Split(line, ":")
if len(lineParts) == 2 {
if lineParts[0] == "redis_version" {
databaseConnection.Version = strings.TrimSpace(lineParts[1])
}
}
}
}
}
b, err := json.Marshal(databaseConnection)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal database connection")
}
collectorName := databaseCollector.CollectorName
if collectorName == "" {
collectorName = "redis"
}
output := NewResult()
output.SaveResult(c.BundlePath, fmt.Sprintf("redis/%s.json", collectorName), bytes.NewBuffer(b))
return output, nil
}
|
package main
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/printer"
"go/token"
"log"
"os"
"strings"
"./rewriter"
"golang.org/x/tools/go/ast/astutil"
)
// TODO: comments are moved around by this script. Look at go/ast's CommentMap
// https://golang.org/pkg/go/ast/#CommentMap
// This script will emulate spacing that it sees in source code.
// For example, if it sees the spacing seen below:
// b1 := bson.M{
// "apple": 1,
// }
//
// It will create the following new code:
// b1 := bsonutil.NewD(
// bsonutil.NewDocElem("apple", 1),
// )
// The method by which the functions below insert newlines is a bit hacky,
// (by necessity). We insert the *ast.Ident "theremin" between arguments to functions, and
// replace those idents with newlines using sed within replace.sh.
// In the above example, the output of this go script would be:
// b1 := bsonutil.NewD(theremin, bsonutil.NewDocElem("a", 1), theremin)
// which would be ultimately converted to:
// b1 := bsonutil.NewD(
// bsonutil.NewDocElem("apple", 1),
// )
// So, why are we doing this? There are no methods for adding newlines
// using the packages imported above.
// go/token has an AddLine method. However, this one can only be used
// to add a line to the very end of a file. go/printer will always print a CallExpr
// on a single line (https://golang.org/src/go/printer/nodes.go, line 150).
// Why "theremin"? "theremin" was chosen as the replacement string because
// it struck me as a word not likely to be found in the
// codebase. If "theremin" ever does come to live in the codebase,
// choose a new word!
var replace = ast.NewIdent("theremin")
type bsonRewriter struct {
fs *token.FileSet
Changed bool
}
func countOpenBracketNewline(s string) int {
return strings.Count(s, "{\n")
}
func countNewlineCloseBracket(s string) int {
return strings.Count(s, "\n}")
}
func numberOfCommaNewlines(s string) int {
return strings.Count(s, ",\n")
}
// shouldNewlines will look at the original bson.X{} call, and return a boolean
// for the the presence of a newline after the opening brace, before the closing
// brace, and between arguments.
func (rw *bsonRewriter) shouldNewlines(typedN ast.Expr) (beg, end, args bool) {
// start out false
// var beg, end, args bool
buf := new(bytes.Buffer)
if err := printer.Fprint(buf, rw.fs, typedN); err != nil {
log.Fatal(err)
}
a := buf.String()
if countOpenBracketNewline(a) > 0 {
beg = true
}
if countNewlineCloseBracket(a) > 0 {
end = true
}
if numberOfCommaNewlines(a) > 0 {
args = true
}
return beg, end, args
}
// newCallExpr returns a new *ast.CallExpr of the form pkg.fxn(<args>)
func newCallExpr(pkg string, fxn string, args []ast.Expr) *ast.CallExpr {
pkgIdent := ast.NewIdent(pkg)
fxnIdent := ast.NewIdent(fxn)
selExpr := &ast.SelectorExpr{X: pkgIdent, Sel: fxnIdent}
callExpr := &ast.CallExpr{Fun: selExpr, Args: args}
return callExpr
}
// handleBsonDocElem will convert a bson.DocElem{Name:<name>, Value:<value>} to bsonutil.NewDocElem(name, value)
// This function defaults to not adding newlines.
func (rw *bsonRewriter) handleBsonDocElem(typedN *ast.CompositeLit) (ast.Node, rewriter.Rewriter) {
rw.Changed = true
subArgs := []ast.Expr{}
// iterate over elements of composite lit
// e.g.Name: "a", Value: 5
for _, subElt := range typedN.Elts {
kvExpr := subElt.(*ast.KeyValueExpr)
value := rw.handleInterfaceArray(kvExpr.Value)
subArgs = append(subArgs, value)
}
return newCallExpr("bsonutil", "NewDocElem", subArgs), rw
}
// handleInterfaceArray will check if val is an interface array, and if
// so, convert it to a bsonutil.NewArray().
// This function will always add newlines between items in bsonutil.NewArray.
func (rw *bsonRewriter) handleInterfaceArray(val ast.Expr) ast.Expr {
// check that it is an interface array
// if not, return val
cl, ok := val.(*ast.CompositeLit)
if !ok {
return val
}
arr, ok := cl.Type.(*ast.ArrayType)
if !ok {
return val
}
_, ok = arr.Elt.(*ast.InterfaceType)
if !ok {
return val
}
args := []ast.Expr{}
// newline after opening paren of NewArray
args = append(args, replace)
for _, item := range cl.Elts {
// call this recursively to deal with interface arrays within interface arrays
args = append(args, rw.handleInterfaceArray(item))
// put newlines between items in array
args = append(args, replace)
}
return newCallExpr("bsonutil", "NewArray", args)
}
// handleBsonM will convert "bson.M{<kv pairs>} into bsonutil.NewM(<DocElems>)"
func (rw *bsonRewriter) handleBsonM(typedN *ast.CompositeLit) (ast.Node, rewriter.Rewriter) {
rw.Changed = true
beg, end, commas := rw.shouldNewlines(typedN)
args := []ast.Expr{}
if beg {
args = append(args, replace)
}
for _, item := range typedN.Elts {
switch typedElt := item.(type) {
case *ast.KeyValueExpr:
// "k:v" -> "NewDocElem(k,v)"
value := rw.handleInterfaceArray(typedElt.Value)
kv := []ast.Expr{}
kv = append(kv, typedElt.Key, value)
args = append(args, newCallExpr("bsonutil", "NewDocElem", kv))
if commas {
args = append(args, replace)
}
default:
//fmt.Printf("in handleBsonM, Bson.M Elts was not of type KeyValueExpr, type %T. \n", typedElt)
//fmt.Println("Appending anyways...")
args = append(args, item)
if commas {
args = append(args, replace)
}
}
}
if end {
args = append(args, replace)
}
newNode := newCallExpr("bsonutil", "NewM", args)
return newNode, rw
}
// handleBsonD will convert "bson.D{<DocElems>} into bsonutil.NewD(<DocElems>)"
func (rw *bsonRewriter) handleBsonD(typedN *ast.CompositeLit) (ast.Node, rewriter.Rewriter) {
beg, end, commas := rw.shouldNewlines(typedN)
rw.Changed = true
args := []ast.Expr{}
if beg {
args = append(args, replace)
}
for _, item := range typedN.Elts {
switch typedElt := item.(type) {
// We know that its going to be two Elts, one "Name" and one "Value"
case *ast.CompositeLit:
subArgs := []ast.Expr{}
for _, subElt := range typedElt.Elts {
kvExpr := subElt.(*ast.KeyValueExpr)
value := rw.handleInterfaceArray(kvExpr.Value)
subArgs = append(subArgs, value)
}
args = append(args, newCallExpr("bsonutil", "NewDocElem", subArgs))
if commas {
args = append(args, replace)
}
default:
//fmt.Printf("in handleBsonD, Bson.D Elts was not of type CompositeLit, type %T. \n", typedElt)
//fmt.Println("Appending anyways...")
args = append(args, item)
if commas {
args = append(args, replace)
}
}
}
if end {
args = append(args, replace)
}
newNode := newCallExpr("bsonutil", "NewD", args)
return newNode, rw
}
// handleBsonMArray will convert "[]bson.M{<bson.M's>} into bsonutil.NewMArray(<bson.M's>)"
func (rw *bsonRewriter) handleBsonMArray(typedN *ast.CompositeLit) (ast.Node, rewriter.Rewriter) {
rw.Changed = true
beg, end, commas := rw.shouldNewlines(typedN)
args := []ast.Expr{}
if beg {
args = append(args, replace)
}
for _, item := range typedN.Elts {
switch typedElt := item.(type) {
case *ast.CompositeLit:
bsonM, _ := rw.handleBsonM(typedElt)
bsonMCast := bsonM.(ast.Expr)
args = append(args, bsonMCast)
if commas {
args = append(args, replace)
}
case *ast.KeyValueExpr:
fmt.Println("WARNING: Bson.M Array Elts contained naked k:v pair")
default:
//fmt.Printf("Bson.M Array Elts contained non CompositeLit, type %T \n", typedElt)
//fmt.Println("Appending anyways...")
args = append(args, item)
if commas {
args = append(args, replace)
}
}
}
if end {
args = append(args, replace)
}
newNode := newCallExpr("bsonutil", "NewMArray", args)
return newNode, rw
}
// handleBsonDArray will convert "[]bson.D{<bson.D's>} into bsonutil.NewDArray(<bson.D's>)"
func (rw *bsonRewriter) handleBsonDArray(typedN *ast.CompositeLit) (ast.Node, rewriter.Rewriter) {
rw.Changed = true
beg, end, commas := rw.shouldNewlines(typedN)
args := []ast.Expr{}
if beg {
args = append(args, replace)
}
for _, item := range typedN.Elts {
switch typedElt := item.(type) {
case *ast.CompositeLit:
bsonD, _ := rw.handleBsonD(typedElt)
bsonDCast := bsonD.(ast.Expr)
args = append(args, bsonDCast)
if commas {
args = append(args, replace)
}
case *ast.KeyValueExpr:
fmt.Println("WARNING: Bson.D Array Elts contained naked k:v pair")
default:
//fmt.Printf("Bson.D Array Elts was not CompositeLit, but %T \n", typedElt)
//fmt.Println("Appending anyways...")
args = append(args, typedElt)
if commas {
args = append(args, replace)
}
}
}
if end {
args = append(args, replace)
}
newNode := newCallExpr("bsonutil", "NewDArray", args)
return newNode, rw
}
func (rw *bsonRewriter) Rewrite(node ast.Node) (ast.Node, rewriter.Rewriter) {
newNode := node
switch typedN := node.(type) {
case *ast.CompositeLit:
switch typedType := typedN.Type.(type) {
case *ast.ArrayType:
typedElt := typedType.Elt
selExpr, ok := typedElt.(*ast.SelectorExpr)
if !ok {
break
}
if fmt.Sprintf("%v", selExpr) == "&{bson M}" {
newNode, _ = rw.handleBsonMArray(typedN)
}
if fmt.Sprintf("%v", selExpr) == "&{bson D}" {
newNode, _ = rw.handleBsonDArray(typedN)
}
}
if fmt.Sprintf("%v", typedN.Type) == "&{bson M}" {
newNode, _ = rw.handleBsonM(typedN)
}
if fmt.Sprintf("%v", typedN.Type) == "&{bson D}" {
newNode, _ = rw.handleBsonD(typedN)
}
if fmt.Sprintf("%v", typedN.Type) == "&{bson DocElem}" {
newNode, _ = rw.handleBsonDocElem(typedN)
}
default:
}
return newNode, rw
}
func rewriteFile(filename string) {
// parse file
fset := token.NewFileSet()
node, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
if err != nil {
log.Fatal(err)
}
// do the rewrite
rw := &bsonRewriter{fs: fset}
newNode := rewriter.Rewrite(rw, node)
// add import
// if the imports arent perfect, replace.sh calls goimports and will fix it up!
_ = astutil.AddImport(fset, node, "github.com/10gen/sqlproxy/internal/util/bsonutil")
// print it back out
f, err := os.Create(filename)
if err := format.Node(f, fset, newNode); err != nil {
log.Fatal(err)
f.Close()
}
f.Close()
}
func main() {
rewriteFile(os.Args[1])
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package camera
import (
"context"
"fmt"
"regexp"
"chromiumos/tast/common/media/caps"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/camera/testutil"
"chromiumos/tast/local/crosconfig"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: PrivacySwitch,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies the privacy switch",
Contacts: []string{
"ribalda@chromium.org",
"chromeos-camera-eng@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{caps.BuiltinUSBCamera},
// Primus camera module's privacy switch is not connected to the shutter b/236661871
HardwareDeps: hwdep.D(hwdep.SkipOnModel("primus")),
})
}
var ctrlBusy = regexp.MustCompile(`(?m)^VIDIOC_G_EXT_CTRLS: failed: Device or resource busy$`)
func hasPrivacySwitchControl(ctx context.Context) (bool, error) {
usbCams, err := testutil.USBCamerasFromV4L2Test(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to get USB cameras")
}
if len(usbCams) == 0 {
return false, errors.New("failed to find any valid device")
}
testing.ContextLog(ctx, "USB cameras: ", usbCams)
for _, devicePath := range usbCams {
cmd := testexec.CommandContext(ctx, "v4l2-ctl", "-C", "privacy", "-d", devicePath)
out, err := cmd.Output(testexec.DumpLogOnError)
if err == nil || (cmd.ProcessState.ExitCode() == 255 && ctrlBusy.Match(out)) {
testing.ContextLogf(ctx, "Device %s has a privacy control", devicePath)
return true, nil
}
// An error != 255 indicates that the control does not exist, which is a valid result
}
testing.ContextLog(ctx, "No device has a privacy control")
return false, nil
}
type privacySwitchPresence uint
const (
privacySwitchNotPresent privacySwitchPresence = iota
privacySwitchPresent
privacySwitchIgnore
)
func hasPrivacySwitchHardware(ctx context.Context) (privacySwitchPresence, error) {
for i := 0; ; i++ {
device := fmt.Sprintf("/camera/devices/%v", i)
_, err := crosconfig.Get(ctx, device, "interface")
if crosconfig.IsNotFound(err) {
break
}
if err != nil {
return privacySwitchNotPresent, errors.Wrap(err, "failed to execute cros_config")
}
val, err := crosconfig.Get(ctx, device, "has-privacy-switch")
if crosconfig.IsNotFound(err) {
continue
}
if err != nil {
return privacySwitchNotPresent, errors.Wrap(err, "failed to execute cros_config")
}
if val == "true" {
testing.ContextLogf(ctx, "Camera %v supports privacy switch", i)
return privacySwitchPresent, nil
}
if val == "false" {
testing.ContextLogf(ctx, "Camera %v has unconnected privacy switch", i)
return privacySwitchIgnore, nil
}
}
testing.ContextLog(ctx, "No privacy switch found")
return privacySwitchNotPresent, nil
}
func PrivacySwitch(ctx context.Context, s *testing.State) {
hasControl, err := hasPrivacySwitchControl(ctx)
if err != nil {
s.Fatal("Failed to get privacy switch control: ", err)
}
privacySwitch, err := hasPrivacySwitchHardware(ctx)
if err != nil {
s.Fatal("Failed to get privacy switch hardware: ", err)
}
if privacySwitch == privacySwitchPresent && !hasControl {
s.Error("Privacy switch present but no video device can access it")
}
if privacySwitch == privacySwitchNotPresent && hasControl {
s.Error("Privacy switch not present in hardware but accessible via v4l control")
}
}
|
package main
import (
"bytes"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"time"
"golang.org/x/crypto/ssh/terminal"
"github.com/gertd/pdbq/helper"
)
// Token - Puppet RBAC token
type Token struct {
Token string `json:"token"`
}
func main() {
var token Token
var hostname string
var username string
var password string
flag.StringVar(&hostname, "hostname", "", "puppet hostname")
flag.StringVar(&username, "username", "", "puppet username")
flag.StringVar(&password, "password", "", "puppet password")
flag.Parse()
if len(password) == 0 {
fmt.Println("puppet password")
buf, err := terminal.ReadPassword(0)
if err != nil {
log.Fatalln(err)
}
password = string(buf)
}
{
const portnumber = 4433
const endpoint = `rbac-api/v1/auth/token`
url := fmt.Sprintf("https://%s:%d/%s", hostname, portnumber, endpoint)
var jsonStr = []byte(fmt.Sprintf(`{"login": "%s", "password": "%s"}`, username, password))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
if err != nil {
log.Fatalln("err ", err)
}
req.Header.Set("Content-Type", "application/json")
tr := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableCompression: true,
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Fatalln("err ", err)
}
if resp != nil {
defer resp.Body.Close()
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln("err ", err)
}
err = json.Unmarshal(buf, &token)
if err != nil {
log.Fatalln("err ", err)
}
}
{
const portnumber = 8081
const endpoint = `pdb/query/v4/inventory`
url := fmt.Sprintf("https://%s:%d/%s", hostname, portnumber, endpoint)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatalln("err ", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Authentication", token.Token)
tr := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DisableCompression: true,
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Fatalln("err ", err)
}
if resp != nil {
defer resp.Body.Close()
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln("err ", err)
}
var result interface{}
err = json.Unmarshal(buf, &result)
if err != nil {
log.Fatalln("err ", err)
}
fmt.Println(helper.PrettyPrintJSON(result))
}
}
|
package accounts
import (
"testing"
"github.com/google/uuid"
"github.com/jrapoport/gothic/models/account"
"github.com/jrapoport/gothic/models/types"
"github.com/jrapoport/gothic/models/types/provider"
"github.com/jrapoport/gothic/store"
"github.com/jrapoport/gothic/test/tconn"
"github.com/jrapoport/gothic/test/tutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const name = provider.Google
func testAccount(t *testing.T, conn *store.Connection, data types.Map) *account.Account {
var aid = uuid.New().String()
var mail = tutils.RandomEmail()
var uid = uuid.New()
la := account.NewAccount(name, aid, mail, data)
la.UserID = uid
err := conn.Create(la).Error
require.NoError(t, err)
return la
}
func TestGetAccount(t *testing.T) {
conn, _ := tconn.TempConn(t)
la := testAccount(t, conn, nil)
got, err := GetAccount(conn, name, la.AccountID)
assert.NoError(t, err)
assert.Equal(t, la.Email, got.Email)
got, err = GetAccount(conn, name, "")
assert.Error(t, err)
assert.Nil(t, got)
got, err = GetAccount(conn, provider.Unknown, la.AccountID)
assert.Error(t, err)
assert.Nil(t, got)
}
func TestHasAccount(t *testing.T) {
conn, _ := tconn.TempConn(t)
la := testAccount(t, conn, nil)
has, err := HasAccount(conn, name, la.AccountID)
assert.NoError(t, err)
assert.True(t, has)
has, err = HasAccount(conn, name, "")
assert.NoError(t, err)
assert.False(t, has)
has, err = HasAccount(conn, provider.Unknown, la.AccountID)
assert.NoError(t, err)
assert.False(t, has)
}
func TestUpdateAccount(t *testing.T) {
conn, _ := tconn.TempConn(t)
data := types.Map{
"hello": "world",
"foobar": 13.37,
}
la := testAccount(t, conn, data)
email := la.Email
aid := la.AccountID
emailIn := tutils.RandomEmail()
dataIn := types.Map{
"hello": "bar",
"quack": "ok",
}
dataOut := types.Map{
"hello": "bar",
"foobar": 13.37,
"quack": "ok",
}
tests := []struct {
email *string
data types.Map
assertEmail string
assertData types.Map
Ok assert.BoolAssertionFunc
}{
{nil, nil, email, data, assert.False},
{&email, nil, email, data, assert.False},
{nil, types.Map{}, email, data, assert.False},
{&emailIn, nil, emailIn, data, assert.True},
{nil, dataIn, email, dataOut, assert.True},
{&emailIn, dataIn, emailIn, dataOut, assert.True},
}
for _, test := range tests {
ok, err := UpdateAccount(conn, la, test.email, test.data)
assert.NoError(t, err)
test.Ok(t, ok)
assert.Equal(t, aid, la.AccountID)
assert.Equal(t, test.assertEmail, la.Email)
assert.Equal(t, test.assertData, la.Data)
la.Email = email
la.Data = data
err = conn.Save(la).Error
require.NoError(t, err)
}
}
|
package backends
import (
"database/sql"
"errors"
"fmt"
"log"
"strings"
"sync"
"time"
)
const (
dbTypePostgres = "postgres"
dbTypeMysql = "mysql"
)
// Opt represents SQL DB backend's options.
type Opt struct {
DBType string
ResultsTable string
UnloggedTables bool
}
// sqlDB represents the sqlDB backend.
type sqlDB struct {
db *sql.DB
opt Opt
logger *log.Logger
// The result schemas (CREATE TABLE ...) are dynamically
// generated everytime queries are executed based on their result columns.
// They're cached here so as to avoid repetetive generation.
resTableSchemas map[string]insertSchema
schemaMutex sync.RWMutex
}
// sqlDBWriter represents a writer that saves results
// to a sqlDB backend.
type sqlDBWriter struct {
jobID string
taskName string
colsWritten bool
cols []string
rows [][]byte
tx *sql.Tx
tbl string
backend *sqlDB
}
// insertSchema contains the generated SQL for creating tables
// and inserting rows.
type insertSchema struct {
dropTable string
createTable string
insertRow string
}
// NewSQLBackend returns a new sqlDB result backend instance.
// It accepts an *sql.DB connection
func NewSQLBackend(db *sql.DB, opt Opt, l *log.Logger) (ResultBackend, error) {
s := sqlDB{
db: db,
opt: opt,
resTableSchemas: make(map[string]insertSchema),
schemaMutex: sync.RWMutex{},
logger: l,
}
// Config.
if opt.ResultsTable != "" {
s.opt.ResultsTable = opt.ResultsTable
} else {
s.opt.ResultsTable = "results_%s"
}
return &s, nil
}
// NewResultSet returns a new instance of an sqlDB result writer.
// A new instance should be acquired for every individual job result
// to be written to the backend and then thrown away.
func (s *sqlDB) NewResultSet(jobID, taskName string, ttl time.Duration) (ResultSet, error) {
tx, err := s.db.Begin()
if err != nil {
return nil, err
}
return &sqlDBWriter{
jobID: jobID,
taskName: taskName,
backend: s,
tbl: fmt.Sprintf(s.opt.ResultsTable, jobID),
tx: tx,
}, nil
}
// RegisterColTypes registers the column types of a particular taskName's result set.
// Internally, it translates sql types into the simpler sqlDB (SQLite 3) types,
// creates a CREATE TABLE() schema for the results table with the structure of the
// particular taskName, and caches it be used for every subsequent result db creation
// and population. This should only be called once for each kind of taskName.
func (w *sqlDBWriter) RegisterColTypes(cols []string, colTypes []*sql.ColumnType) error {
if w.IsColTypesRegistered() {
return errors.New("column types are already registered")
}
w.cols = make([]string, len(cols))
copy(w.cols, cols)
// Create the insert statement.
// INSERT INTO xxx (col1, col2...) VALUES.
var (
colNameHolder = make([]string, len(cols))
colValHolder = make([]string, len(cols))
)
for i := range w.cols {
colNameHolder[i] = fmt.Sprintf(`"%s"`, w.cols[i])
// This will be filled by the driver.
if w.backend.opt.DBType == dbTypePostgres {
// Postgres placeholders are $1, $2 ...
colValHolder[i] = fmt.Sprintf("$%d", i+1)
} else {
colValHolder[i] = "?"
}
}
ins := fmt.Sprintf(`INSERT INTO "%%s" (%s) `, strings.Join(colNameHolder, ","))
ins += fmt.Sprintf("VALUES (%s)", strings.Join(colValHolder, ","))
w.backend.schemaMutex.Lock()
w.backend.resTableSchemas[w.taskName] = w.backend.createTableSchema(cols, colTypes)
w.backend.schemaMutex.Unlock()
return nil
}
// IsColTypesRegistered checks whether the column types for a particular taskName's
// structure is registered in the backend.
func (w *sqlDBWriter) IsColTypesRegistered() bool {
w.backend.schemaMutex.RLock()
_, ok := w.backend.resTableSchemas[w.taskName]
w.backend.schemaMutex.RUnlock()
return ok
}
// WriteCols writes the column (headers) of a result set to the backend.
// Internally, it creates a sqlDB database and creates a results table
// based on the schema RegisterColTypes() would've created and cached.
// This should only be called once on a ResultWriter instance.
func (w *sqlDBWriter) WriteCols(cols []string) error {
if w.colsWritten {
return fmt.Errorf("columns for '%s' are already written", w.taskName)
}
w.backend.schemaMutex.RLock()
rSchema, ok := w.backend.resTableSchemas[w.taskName]
w.backend.schemaMutex.RUnlock()
if !ok {
return fmt.Errorf("column types for '%s' have not been registered", w.taskName)
}
// Create the results table.
tx, err := w.backend.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
if _, err := tx.Exec(fmt.Sprintf(rSchema.dropTable, w.tbl)); err != nil {
return err
}
if _, err := tx.Exec(fmt.Sprintf(rSchema.createTable, w.tbl)); err != nil {
return err
}
if err := tx.Commit(); err != nil {
return err
}
return err
}
// WriteRow writes an individual row from a result set to the backend.
// Internally, it INSERT()s the given row into the sqlDB results table.
func (w *sqlDBWriter) WriteRow(row []interface{}) error {
w.backend.schemaMutex.RLock()
rSchema, ok := w.backend.resTableSchemas[w.taskName]
w.backend.schemaMutex.RUnlock()
if !ok {
return fmt.Errorf("column types for '%s' have not been registered", w.taskName)
}
_, err := w.tx.Exec(fmt.Sprintf(rSchema.insertRow, w.tbl), row...)
return err
}
// Flush flushes the rows written into the sqlDB pipe.
func (w *sqlDBWriter) Flush() error {
err := w.tx.Commit()
if err != nil {
return err
}
return nil
}
// Close closes the active sqlDB connection.
func (w *sqlDBWriter) Close() error {
if w.tx != nil {
return w.tx.Rollback()
}
return nil
}
// createTableSchema takes an SQL query results, gets its column names and types,
// and generates a sqlDB CREATE TABLE() schema for the results.
func (s *sqlDB) createTableSchema(cols []string, colTypes []*sql.ColumnType) insertSchema {
var (
colNameHolder = make([]string, len(cols))
colValHolder = make([]string, len(cols))
)
for i := range cols {
colNameHolder[i] = fmt.Sprintf(`"%s"`, cols[i])
// This will be filled by the driver.
if s.opt.DBType == dbTypePostgres {
// Postgres placeholders are $1, $2 ...
colValHolder[i] = fmt.Sprintf("$%d", i+1)
} else {
colValHolder[i] = "?"
}
}
var (
fields = make([]string, len(cols))
typ = ""
unlogged = ""
)
for i := 0; i < len(cols); i++ {
typ = colTypes[i].DatabaseTypeName()
switch colTypes[i].DatabaseTypeName() {
case "INT2", "INT4", "INT8", // Postgres
"TINYINT", "SMALLINT", "INT", "MEDIUMINT", "BIGINT": // MySQL
typ = "BIGINT"
case "FLOAT4", "FLOAT8", // Postgres
"DECIMAL", "FLOAT", "DOUBLE", "NUMERIC": // MySQL
typ = "DECIMAL"
case "TIMESTAMP", // Postgres, MySQL
"DATETIME": // MySQL
typ = "TIMESTAMP"
case "DATE": // Postgres, MySQL
typ = "DATE"
case "BOOLEAN": // Postgres, MySQL
typ = "BOOLEAN"
case "JSON", "JSONB": // Postgres
if s.opt.DBType != dbTypePostgres {
typ = "TEXT"
}
// _INT4, _INT8, _TEXT represent array types in Postgres
case "_INT4": // Postgres
typ = "_INT4"
case "_INT8": // Postgres
typ = "_INT8"
case "_TEXT": // Postgres
typ = "_TEXT"
default:
typ = "TEXT"
}
if nullable, ok := colTypes[i].Nullable(); ok && !nullable {
typ += " NOT NULL"
}
fields[i] = fmt.Sprintf(`"%s" %s`, cols[i], typ)
}
// If the DB is Postgres, optionally create an "unlogged" table that disables
// WAL, improving performance of throw-away cache tables.
// https://www.postgresql.org/docs/current/sql-createtable.html
if s.opt.DBType == dbTypePostgres && s.opt.UnloggedTables {
unlogged = "UNLOGGED"
}
return insertSchema{
dropTable: `DROP TABLE IF EXISTS "%s";`,
createTable: fmt.Sprintf(`CREATE %s TABLE IF NOT EXISTS "%%s" (%s);`, unlogged, strings.Join(fields, ",")),
insertRow: fmt.Sprintf(`INSERT INTO "%%s" (%s) VALUES (%s)`, strings.Join(colNameHolder, ","),
strings.Join(colValHolder, ",")),
}
}
|
package solutions
func findDisappearedNumbers(nums []int) []int {
for i := 0; i < len(nums); i++ {
for nums[i] != i + 1 {
current := nums[i]
if nums[current - 1] == current {
break
} else {
nums[i], nums[current - 1] = nums[current - 1], nums[i]
}
}
}
var result []int
for i, value := range nums {
if value != i + 1 {
result = append(result, i + 1)
}
}
return result
}
|
package main
import (
envstruct "code.cloudfoundry.org/go-envstruct"
)
// Config is the configuration for a MetricStore.
type Config struct {
LogProviderAddr string `env:"LOGS_PROVIDER_ADDR, required, report"`
LogsProviderTLS LogsProviderTLS
MetricStoreAddr string `env:"METRIC_STORE_ADDR, required, report"`
MetricStoreTLS MetricStoreClientTLS
IngressAddr string `env:"INGRESS_ADDR, required, report"`
HealthPort int `env:"HEALTH_PORT, report"`
ShardId string `env:"SHARD_ID, required, report"`
TimerRollupBufferSize uint `env:"TIMER_ROLLUP_BUFFER_SIZE, report"`
NodeIndex int `env:"NODE_INDEX, report"`
}
type MetricStoreClientTLS struct {
CAPath string `env:"METRIC_STORE_CLIENT_CA_PATH, required, report"`
CertPath string `env:"METRIC_STORE_CLIENT_CERT_PATH, required, report"`
KeyPath string `env:"METRIC_STORE_CLIENT_KEY_PATH, required, report"`
}
// LogsProviderTLS is the LogsProviderTLS configuration for a MetricStore.
type LogsProviderTLS struct {
LogProviderCA string `env:"LOGS_PROVIDER_CA_PATH, required, report"`
LogProviderCert string `env:"LOGS_PROVIDER_CERT_PATH, required, report"`
LogProviderKey string `env:"LOGS_PROVIDER_KEY_PATH, required, report"`
}
// LoadConfig creates Config object from environment variables
func LoadConfig() (*Config, error) {
c := Config{
MetricStoreAddr: ":8080",
IngressAddr: ":8090",
HealthPort: 6061,
ShardId: "metric-store",
TimerRollupBufferSize: 16384,
}
if err := envstruct.Load(&c); err != nil {
return nil, err
}
return &c, nil
}
|
package image
import (
"sync"
)
// Grayscale turns the images to grayscale.
func (img *Image) Grayscale(algorithm int) *Image {
var wg sync.WaitGroup
for rowIndex := 0; rowIndex < img.Height; rowIndex++ {
wg.Add(1)
go (func(rowIndex int, img *Image) {
for colIndex := 0; colIndex < img.Width; colIndex++ {
pixel := img.Pixels[rowIndex][colIndex]
var gray int
if algorithm == GrayscaleLuma {
gray = int(float32(pixel.R)*0.2126 + float32(pixel.G)*0.7152 + float32(pixel.B)*0.0722)
} else if algorithm == GrayscaleDesaturation {
gray = int((max(pixel.R, pixel.G, pixel.B) + min(pixel.R, pixel.G, pixel.B)) / 2)
} else {
gray = int((pixel.R + pixel.G + pixel.B) / 3)
}
pixel.Set("R", gray)
pixel.Set("G", gray)
pixel.Set("B", gray)
img.Pixels[rowIndex][colIndex] = pixel
}
wg.Done()
})(rowIndex, img)
}
wg.Wait()
return img
}
|
package eventstore
import (
"context"
"time"
"github.com/caos/logging"
"github.com/caos/zitadel/internal/auth/repository/eventsourcing/view"
"github.com/caos/zitadel/internal/auth_request/model"
cache "github.com/caos/zitadel/internal/auth_request/repository"
"github.com/caos/zitadel/internal/errors"
es_models "github.com/caos/zitadel/internal/eventstore/models"
"github.com/caos/zitadel/internal/id"
org_model "github.com/caos/zitadel/internal/org/model"
org_view_model "github.com/caos/zitadel/internal/org/repository/view/model"
user_model "github.com/caos/zitadel/internal/user/model"
user_event "github.com/caos/zitadel/internal/user/repository/eventsourcing"
es_model "github.com/caos/zitadel/internal/user/repository/eventsourcing/model"
user_view_model "github.com/caos/zitadel/internal/user/repository/view/model"
)
type AuthRequestRepo struct {
UserEvents *user_event.UserEventstore
AuthRequests cache.AuthRequestCache
View *view.View
UserSessionViewProvider userSessionViewProvider
UserViewProvider userViewProvider
UserEventProvider userEventProvider
OrgViewProvider orgViewProvider
IdGenerator id.Generator
PasswordCheckLifeTime time.Duration
MfaInitSkippedLifeTime time.Duration
MfaSoftwareCheckLifeTime time.Duration
MfaHardwareCheckLifeTime time.Duration
}
type userSessionViewProvider interface {
UserSessionByIDs(string, string) (*user_view_model.UserSessionView, error)
UserSessionsByAgentID(string) ([]*user_view_model.UserSessionView, error)
}
type userViewProvider interface {
UserByID(string) (*user_view_model.UserView, error)
}
type userEventProvider interface {
UserEventsByID(ctx context.Context, id string, sequence uint64) ([]*es_models.Event, error)
}
type orgViewProvider interface {
OrgByID(string) (*org_view_model.OrgView, error)
}
func (repo *AuthRequestRepo) Health(ctx context.Context) error {
if err := repo.UserEvents.Health(ctx); err != nil {
return err
}
return repo.AuthRequests.Health(ctx)
}
func (repo *AuthRequestRepo) CreateAuthRequest(ctx context.Context, request *model.AuthRequest) (*model.AuthRequest, error) {
reqID, err := repo.IdGenerator.Next()
if err != nil {
return nil, err
}
request.ID = reqID
ids, err := repo.View.AppIDsFromProjectByClientID(ctx, request.ApplicationID)
if err != nil {
return nil, err
}
request.Audience = ids
if request.LoginHint != "" {
err = repo.checkLoginName(request, request.LoginHint)
logging.LogWithFields("EVENT-aG311", "login name", request.LoginHint, "id", request.ID, "applicationID", request.ApplicationID).Debug("login hint invalid")
}
err = repo.AuthRequests.SaveAuthRequest(ctx, request)
if err != nil {
return nil, err
}
return request, nil
}
func (repo *AuthRequestRepo) AuthRequestByID(ctx context.Context, id string) (*model.AuthRequest, error) {
return repo.getAuthRequest(ctx, id, false)
}
func (repo *AuthRequestRepo) AuthRequestByIDCheckLoggedIn(ctx context.Context, id string) (*model.AuthRequest, error) {
return repo.getAuthRequest(ctx, id, true)
}
func (repo *AuthRequestRepo) SaveAuthCode(ctx context.Context, id, code string) error {
request, err := repo.AuthRequests.GetAuthRequestByID(ctx, id)
if err != nil {
return err
}
request.Code = code
return repo.AuthRequests.UpdateAuthRequest(ctx, request)
}
func (repo *AuthRequestRepo) AuthRequestByCode(ctx context.Context, code string) (*model.AuthRequest, error) {
request, err := repo.AuthRequests.GetAuthRequestByCode(ctx, code)
if err != nil {
return nil, err
}
steps, err := repo.nextSteps(ctx, request, true)
if err != nil {
return nil, err
}
request.PossibleSteps = steps
return request, nil
}
func (repo *AuthRequestRepo) DeleteAuthRequest(ctx context.Context, id string) error {
return repo.AuthRequests.DeleteAuthRequest(ctx, id)
}
func (repo *AuthRequestRepo) CheckLoginName(ctx context.Context, id, loginName string) error {
request, err := repo.AuthRequests.GetAuthRequestByID(ctx, id)
if err != nil {
return err
}
err = repo.checkLoginName(request, loginName)
if err != nil {
return err
}
return repo.AuthRequests.UpdateAuthRequest(ctx, request)
}
func (repo *AuthRequestRepo) SelectUser(ctx context.Context, id, userID string) error {
request, err := repo.AuthRequests.GetAuthRequestByID(ctx, id)
if err != nil {
return err
}
user, err := activeUserByID(ctx, repo.UserViewProvider, repo.UserEventProvider, repo.OrgViewProvider, userID)
if err != nil {
return err
}
request.SetUserInfo(user.ID, user.PreferredLoginName, user.DisplayName, user.ResourceOwner)
return repo.AuthRequests.UpdateAuthRequest(ctx, request)
}
func (repo *AuthRequestRepo) VerifyPassword(ctx context.Context, id, userID, password string, info *model.BrowserInfo) error {
request, err := repo.AuthRequests.GetAuthRequestByID(ctx, id)
if err != nil {
return err
}
if request.UserID != userID {
return errors.ThrowPreconditionFailed(nil, "EVENT-ds35D", "Errors.User.NotMatchingUserID")
}
return repo.UserEvents.CheckPassword(ctx, userID, password, request.WithCurrentInfo(info))
}
func (repo *AuthRequestRepo) VerifyMfaOTP(ctx context.Context, authRequestID, userID string, code string, info *model.BrowserInfo) error {
request, err := repo.AuthRequests.GetAuthRequestByID(ctx, authRequestID)
if err != nil {
return err
}
if request.UserID != userID {
return errors.ThrowPreconditionFailed(nil, "EVENT-ADJ26", "Errors.User.NotMatchingUserID")
}
return repo.UserEvents.CheckMfaOTP(ctx, userID, code, request.WithCurrentInfo(info))
}
func (repo *AuthRequestRepo) getAuthRequest(ctx context.Context, id string, checkLoggedIn bool) (*model.AuthRequest, error) {
request, err := repo.AuthRequests.GetAuthRequestByID(ctx, id)
if err != nil {
return nil, err
}
steps, err := repo.nextSteps(ctx, request, checkLoggedIn)
if err != nil {
return nil, err
}
request.PossibleSteps = steps
return request, nil
}
func (repo *AuthRequestRepo) checkLoginName(request *model.AuthRequest, loginName string) error {
user, err := repo.View.UserByLoginName(loginName)
if err != nil {
return err
}
request.SetUserInfo(user.ID, loginName, "", user.ResourceOwner)
return nil
}
func (repo *AuthRequestRepo) nextSteps(ctx context.Context, request *model.AuthRequest, checkLoggedIn bool) ([]model.NextStep, error) {
if request == nil {
return nil, errors.ThrowInvalidArgument(nil, "EVENT-ds27a", "Errors.Internal")
}
steps := make([]model.NextStep, 0)
if !checkLoggedIn && request.Prompt == model.PromptNone {
return append(steps, &model.RedirectToCallbackStep{}), nil
}
if request.UserID == "" {
steps = append(steps, &model.LoginStep{})
if request.Prompt == model.PromptSelectAccount || request.Prompt == model.PromptUnspecified {
users, err := repo.usersForUserSelection(request)
if err != nil {
return nil, err
}
if len(users) > 0 || request.Prompt == model.PromptSelectAccount {
steps = append(steps, &model.SelectUserStep{Users: users})
}
}
return steps, nil
}
user, err := activeUserByID(ctx, repo.UserViewProvider, repo.UserEventProvider, repo.OrgViewProvider, request.UserID)
if err != nil {
return nil, err
}
userSession, err := userSessionByIDs(ctx, repo.UserSessionViewProvider, repo.UserEventProvider, request.AgentID, user)
if err != nil {
return nil, err
}
if user.InitRequired {
return append(steps, &model.InitUserStep{PasswordSet: user.PasswordSet}), nil
}
if !user.PasswordSet {
return append(steps, &model.InitPasswordStep{}), nil
}
if !checkVerificationTime(userSession.PasswordVerification, repo.PasswordCheckLifeTime) {
return append(steps, &model.PasswordStep{}), nil
}
request.PasswordVerified = true
request.AuthTime = userSession.PasswordVerification
if step, ok := repo.mfaChecked(userSession, request, user); !ok {
return append(steps, step), nil
}
if user.PasswordChangeRequired {
steps = append(steps, &model.ChangePasswordStep{})
}
if !user.IsEmailVerified {
steps = append(steps, &model.VerifyEMailStep{})
}
if user.PasswordChangeRequired || !user.IsEmailVerified {
return steps, nil
}
//PLANNED: consent step
return append(steps, &model.RedirectToCallbackStep{}), nil
}
func (repo *AuthRequestRepo) usersForUserSelection(request *model.AuthRequest) ([]model.UserSelection, error) {
userSessions, err := userSessionsByUserAgentID(repo.UserSessionViewProvider, request.AgentID)
if err != nil {
return nil, err
}
users := make([]model.UserSelection, len(userSessions))
for i, session := range userSessions {
users[i] = model.UserSelection{
UserID: session.UserID,
DisplayName: session.DisplayName,
LoginName: session.LoginName,
UserSessionState: session.State,
}
}
return users, nil
}
func (repo *AuthRequestRepo) mfaChecked(userSession *user_model.UserSessionView, request *model.AuthRequest, user *user_model.UserView) (model.NextStep, bool) {
mfaLevel := request.MfaLevel()
promptRequired := user.MfaMaxSetUp < mfaLevel
if promptRequired || !repo.mfaSkippedOrSetUp(user) {
return &model.MfaPromptStep{
Required: promptRequired,
MfaProviders: user.MfaTypesSetupPossible(mfaLevel),
}, false
}
switch mfaLevel {
default:
fallthrough
case model.MfaLevelNotSetUp:
if user.MfaMaxSetUp == model.MfaLevelNotSetUp {
return nil, true
}
fallthrough
case model.MfaLevelSoftware:
if checkVerificationTime(userSession.MfaSoftwareVerification, repo.MfaSoftwareCheckLifeTime) {
request.MfasVerified = append(request.MfasVerified, userSession.MfaSoftwareVerificationType)
request.AuthTime = userSession.MfaSoftwareVerification
return nil, true
}
fallthrough
case model.MfaLevelHardware:
if checkVerificationTime(userSession.MfaHardwareVerification, repo.MfaHardwareCheckLifeTime) {
request.MfasVerified = append(request.MfasVerified, userSession.MfaHardwareVerificationType)
request.AuthTime = userSession.MfaHardwareVerification
return nil, true
}
}
return &model.MfaVerificationStep{
MfaProviders: user.MfaTypesAllowed(mfaLevel),
}, false
}
func (repo *AuthRequestRepo) mfaSkippedOrSetUp(user *user_model.UserView) bool {
if user.MfaMaxSetUp > model.MfaLevelNotSetUp {
return true
}
return checkVerificationTime(user.MfaInitSkipped, repo.MfaInitSkippedLifeTime)
}
func checkVerificationTime(verificationTime time.Time, lifetime time.Duration) bool {
return verificationTime.Add(lifetime).After(time.Now().UTC())
}
func userSessionsByUserAgentID(provider userSessionViewProvider, agentID string) ([]*user_model.UserSessionView, error) {
session, err := provider.UserSessionsByAgentID(agentID)
if err != nil {
return nil, err
}
return user_view_model.UserSessionsToModel(session), nil
}
func userSessionByIDs(ctx context.Context, provider userSessionViewProvider, eventProvider userEventProvider, agentID string, user *user_model.UserView) (*user_model.UserSessionView, error) {
session, err := provider.UserSessionByIDs(agentID, user.ID)
if err != nil {
if !errors.IsNotFound(err) {
return nil, err
}
session = &user_view_model.UserSessionView{}
}
events, err := eventProvider.UserEventsByID(ctx, user.ID, session.Sequence)
if err != nil {
logging.Log("EVENT-Hse6s").WithError(err).Debug("error retrieving new events")
return user_view_model.UserSessionToModel(session), nil
}
sessionCopy := *session
for _, event := range events {
switch event.Type {
case es_model.UserPasswordCheckSucceeded,
es_model.UserPasswordCheckFailed,
es_model.MfaOtpCheckSucceeded,
es_model.MfaOtpCheckFailed,
es_model.SignedOut,
es_model.UserLocked,
es_model.UserDeactivated:
eventData, err := user_view_model.UserSessionFromEvent(event)
if err != nil {
logging.Log("EVENT-sdgT3").WithError(err).Debug("error getting event data")
return user_view_model.UserSessionToModel(session), nil
}
if eventData.UserAgentID != agentID {
continue
}
case es_model.UserRemoved:
return nil, errors.ThrowPreconditionFailed(nil, "EVENT-dG2fe", "Errors.User.NotActive")
}
sessionCopy.AppendEvent(event)
}
return user_view_model.UserSessionToModel(&sessionCopy), nil
}
func activeUserByID(ctx context.Context, userViewProvider userViewProvider, userEventProvider userEventProvider, orgViewProvider orgViewProvider, userID string) (*user_model.UserView, error) {
user, err := userByID(ctx, userViewProvider, userEventProvider, userID)
if err != nil {
return nil, err
}
if user.State == user_model.UserStateLocked || user.State == user_model.UserStateSuspend {
return nil, errors.ThrowPreconditionFailed(nil, "EVENT-FJ262", "Errors.User.Locked")
}
if !(user.State == user_model.UserStateActive || user.State == user_model.UserStateInitial) {
return nil, errors.ThrowPreconditionFailed(nil, "EVENT-FJ262", "Errors.User.NotActive")
}
org, err := orgViewProvider.OrgByID(user.ResourceOwner)
if err != nil {
return nil, err
}
if org.State != int32(org_model.OrgStateActive) {
return nil, errors.ThrowPreconditionFailed(nil, "EVENT-Zws3s", "Errors.User.NotActive")
}
return user, nil
}
func userByID(ctx context.Context, viewProvider userViewProvider, eventProvider userEventProvider, userID string) (*user_model.UserView, error) {
user, err := viewProvider.UserByID(userID)
if err != nil {
return nil, err
}
events, err := eventProvider.UserEventsByID(ctx, userID, user.Sequence)
if err != nil {
logging.Log("EVENT-dfg42").WithError(err).Debug("error retrieving new events")
return user_view_model.UserToModel(user), nil
}
userCopy := *user
for _, event := range events {
if err := userCopy.AppendEvent(event); err != nil {
return user_view_model.UserToModel(user), nil
}
}
return user_view_model.UserToModel(&userCopy), nil
}
|
package main
import "fmt"
func imprimir() string {
fmt.Println("Imprimindo...")
return "VALOR de IMPRIMIR"
}
func main() {
defer fmt.Println(imprimir())
fmt.Println("2")
fmt.Println("3")
}
//stack
// topo
fmt.Println(imprimir())
fmt.Println(imprimir())
fmt.Println(imprimir())
// fundo
// execucao:
// fmt.Println("3")
// fmt.Println("2")
// fmt.Println("1")
|
package config
import (
"flag"
"log"
"os"
"strconv"
"strings"
)
var globalConfig ApplicationConfig
// ApplicationConfig stores all of the input parameters.
type ApplicationConfig struct {
LogFolders []string // Folders that should be watched for changes.
GrpcBackends []string // gRPC backends to send data to.
GrpcPort int // Port for the gRPC server
}
// Init the configuration from the cli parameters.
func Init() error {
var logFolder string
var grpcAddresses string
var grpcPort string
flag.StringVar(&logFolder, "log-folders", "./logs,./log", "specify folders to read logs from. Default is ./logs,./log")
flag.StringVar(&grpcAddresses, "grpc-addresses", "", "specify the backends to send logs to. Default is none")
flag.StringVar(&grpcPort, "grpc-port", "-1", "specify the port of the gRPC server to start. If none is set no server will be started. Default is none")
flag.Parse()
folders := strings.Split(logFolder, ",")
backends := strings.Split(grpcAddresses, ",")
port, err := strconv.Atoi(grpcPort)
if err != nil {
log.Printf("Could not read port argument '%s' error: %s", grpcPort, err)
port = -1
}
foldersEnv := os.Getenv("LOG_FOLDERS")
backendsEnv := os.Getenv("GRPC_ADDRESSES")
portEnv := os.Getenv("GRPC_PORT")
if foldersEnv != "" {
folders = strings.Split(foldersEnv, ",")
}
if backendsEnv != "" {
backends = strings.Split(backendsEnv, ",")
}
if portEnv != "" {
port2, err := strconv.Atoi(portEnv)
if err != nil {
log.Printf("Could not read port argument '%s' error: %s", portEnv, err)
} else {
port = port2
}
}
globalConfig = ApplicationConfig{
LogFolders: folders,
GrpcBackends: backends,
GrpcPort: port,
}
log.Printf("Reading logs from folders %s", folders)
log.Printf("Sending logs to backends %s", backends)
if port > 0 {
log.Printf("Starting gRPC server on port %s", strconv.Itoa(port))
}
return nil
}
// Get the configuration for this app. You should run 'Init()' before.
func Get() ApplicationConfig {
return globalConfig
}
|
package main
import "fmt"
var sentence string
var emptyString string = ""
var no, yes, maybe = "no", "yes", "maybe"
func main() {
output()
}
func output() {
m := `hello
string \n ` // does not escape any characters in a string
fmt.Println(m)
fmt.Println(sentence)
fmt.Println(emptyString)
fmt.Println(no, yes, maybe)
}
|
package params
import (
"fmt"
)
const (
VersionMajor = 0 // Major version component of the current release
VersionMinor = 0 // Minor version component of the current release
VersionPatch = 0 // Patch version component of the current release
VersionMeta = "unstable" // Version metadata to append to the version string
)
// Version holds the textual version string.
var Version = func() string {
return fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)
}()
// VersionWithMeta holds the textual version string including the metadata.
var VersionWithMeta = func() string {
v := Version
if VersionMeta != "" {
v += "-" + VersionMeta
}
return v
}()
|
package codegen
import (
"fmt"
"testing"
)
func TestPrinter(t *testing.T) {
fmt.Println(DeclPackage("some_package"))
fmt.Println(DeclType("Test", "int"))
}
|
package security_signout_reply
import (
"encoding/xml"
"github.com/tmconsulting/amadeus-ws-go/formats"
)
type SecuritySignOutReply struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/VLSSOR_04_1_1A Security_SignOutReply"`
ErrorSection *ErrorSection `xml:"errorSection,omitempty"`
// This segment is only used if process is OK. In that case P is specified.
ProcessStatus *ResponseAnalysisDetailsType `xml:"processStatus,omitempty"`
}
type ErrorSection struct {
// Application Error
ApplicationError *ApplicationErrorInformationType `xml:"applicationError,omitempty"`
// Supplementary Info on the Error.
InteractiveFreeText *InteractiveFreeTextTypeI `xml:"interactiveFreeText,omitempty"`
}
type ApplicationErrorDetailType struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/VLSSOR_04_1_1A ApplicationErrorDetailType"`
// Code identifying the data validation error condition.
ErrorCode formats.AlphaNumericString_Length1To5 `xml:"errorCode,omitempty"`
// Identification of a code list.
ErrorCategory formats.AlphaNumericString_Length1To3 `xml:"errorCategory,omitempty"`
// Code identifying the agency responsible for a code list.
ErrorCodeOwner formats.AlphaNumericString_Length1To3 `xml:"errorCodeOwner,omitempty"`
}
type ApplicationErrorInformationType struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/VLSSOR_04_1_1A ApplicationErrorInformationType"`
// Application error details.
ErrorDetails *ApplicationErrorDetailType `xml:"errorDetails,omitempty"`
}
type FreeTextQualificationTypeI struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/VLSSOR_04_1_1A FreeTextQualificationTypeI"`
// Subject
TextSubjectQualifier formats.AlphaNumericString_Length1To3 `xml:"textSubjectQualifier,omitempty"`
// Info Type
InformationType formats.AlphaNumericString_Length1To4 `xml:"informationType,omitempty"`
// Language
Language formats.AlphaNumericString_Length1To3 `xml:"language,omitempty"`
}
type InteractiveFreeTextTypeI struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/VLSSOR_04_1_1A InteractiveFreeTextTypeI"`
// Free Text Qualifier
FreeTextQualification *FreeTextQualificationTypeI `xml:"freeTextQualification,omitempty"`
// Free Text
FreeText formats.AlphaNumericString_Length1To70 `xml:"freeText,omitempty"`
}
type ResponseAnalysisDetailsType struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/VLSSOR_04_1_1A ResponseAnalysisDetailsType"`
// P must be specified when status of the process is OK.
StatusCode formats.AlphaString_Length1To6 `xml:"statusCode,omitempty"`
}
|
package gadwords
import (
"encoding/xml"
"fmt"
"log"
"testing"
"time"
)
func testCampaignService(t *testing.T) (service *CampaignService) {
return &CampaignService{Auth: testAuthSetup22(t)}
}
func testCampaign(t *testing.T) (Campaign, func()) {
budget, cleanupBudget := testBudget(t)
cs := testCampaignService(t)
campaigns, err := cs.Mutate(
CampaignOperations{
"ADD": {
Campaign{
Name: "test campaign " + rand_str(10),
Status: "PAUSED",
StartDate: time.Now().Format("20060102"),
BudgetId: budget.Id,
Settings: []CampaignSetting{
NewGeoTargetTypeSetting("DONT_CARE", "LOCATION_OF_PRESENCE"),
},
AdvertisingChannelType: "SEARCH",
BiddingStrategyConfiguration: &BiddingStrategyConfiguration{
StrategyType: "MANUAL_CPC",
},
},
},
},
)
if err != nil {
t.Fatal(err)
}
cleanupCampaign := func() {
campaigns[0].Status = "REMOVED"
_, err = cs.Mutate(CampaignOperations{"SET": campaigns})
if err != nil {
t.Error(err)
}
cleanupBudget()
}
return campaigns[0], cleanupCampaign
}
func TestCampaign(t *testing.T) {
budget, cleanupBudget := testBudget(t)
defer cleanupBudget()
cs := testCampaignService(t)
campaigns, err := cs.Mutate(
CampaignOperations{
"ADD": {
Campaign{
Name: "test campaign " + rand_str(10),
Status: "PAUSED",
StartDate: time.Now().Format("20060102"),
BudgetId: budget.Id,
Settings: []CampaignSetting{
NewGeoTargetTypeSetting("DONT_CARE", "LOCATION_OF_PRESENCE"),
},
AdvertisingChannelType: "SEARCH",
NetworkSetting: &NetworkSetting{
TargetGoogleSearch: true,
TargetSearchNetwork: true,
TargetContentNetwork: false,
TargetPartnerSearchNetwork: false,
},
BiddingStrategyConfiguration: &BiddingStrategyConfiguration{
StrategyType: "MANUAL_CPC",
},
},
},
},
)
if err != nil {
t.Fatal(err)
}
log.Println(campaigns)
log.Println(err)
return
defer func(campaigns []Campaign) {
campaigns[0].Status = "REMOVED"
_, err = cs.Mutate(CampaignOperations{"SET": campaigns})
if err != nil {
t.Error(err)
}
}(campaigns)
label, labelCleanup := testLabel(t)
defer labelCleanup()
campaignLabels, err := cs.MutateLabel(
CampaignLabelOperations{
"ADD": {
CampaignLabel{CampaignId: campaigns[0].Id, LabelId: label.Id},
},
},
)
if err != nil {
t.Fatal(err)
}
defer func() {
campaignLabels, err = cs.MutateLabel(CampaignLabelOperations{"REMOVE": campaignLabels})
if err != nil {
t.Fatal(err)
}
}()
foundCampaigns, _, err := cs.Get(
Selector{
Fields: []string{
"Id",
"Name",
"Status",
"ServingStatus",
"StartDate",
"EndDate",
"Settings",
"Labels",
},
Predicates: []Predicate{
{"Status", "EQUALS", []string{"PAUSED"}},
},
Ordering: []OrderBy{
{"Id", "ASCENDING"},
},
Paging: &Paging{
Offset: 0,
Limit: 100,
},
},
)
if err != nil {
t.Fatal(err)
}
t.Logf("found %d campaigns\n", len(foundCampaigns))
for _, c := range campaigns {
func(campaign Campaign) {
for _, foundCampaign := range foundCampaigns {
if foundCampaign.Id == campaign.Id {
fmt.Printf("%#v", foundCampaign)
return
}
}
t.Errorf("campaign %d not found in \n%#v\n", campaign.Id, foundCampaigns)
}(c)
}
}
func TestAppCampaign(t *testing.T) {
log.SetFlags(log.Flags() | log.Lshortfile)
budget, _ := testBudget(t)
CreateAppCampaign(budget.Id, 160000, "YKTEST0902", "com.camera.momo.cam")
return
}
func TestCampaignGET(t *testing.T) {
log.SetFlags(log.Flags() | log.Lshortfile)
//cs := testCampaignService(t)
cs := testCampaignService(t)
ads, totalCount, err := cs.Get(
Selector{
Fields: []string{
"Name",
"AdvertisingChannelSubType",
/* "BiddingStrategyId",
"BiddingStrategyName",
"BiddingStrategyType",
"TargetCpa",
"Status",
"ServingStatus", */
"AdvertisingChannelType",
//"Amount",
"Id",
"Settings",
"TargetGoogleSearch",
"TargetSearchNetwork",
"TargetContentNetwork",
"TargetPartnerSearchNetwork",
"SelectiveOptimization",
"BiddingStrategyId",
"BiddingStrategyName",
"BiddingStrategyType",
"EnhancedCpcEnabled",
"AdServingOptimizationStatus",
"Eligible",
"TargetSpendBidCeiling",
"TargetSpendSpendTarget",
//"BiddingStrategyConfiguration",
//"AverageCpc",
//"CpcBid",
},
Predicates: []Predicate{{"Name", "EQUALS", []string{"yyktest 1120 testupdate"}}},
},
)
log.Println(err, totalCount, ads)
}
func TestCampaignGET2(t *testing.T) {
log.SetFlags(log.Flags() | log.Lshortfile)
//cs := testCampaignService(t)
cs := testCampaignService(t)
ads, totalCount, err := cs.Get(
Selector{
Fields: []string{
"Name",
"BudgetId",
"Status",
"Id",
},
},
)
log.Println(err, totalCount, ads[0].BudgetId)
}
//APP调价
func TestCampaignSet(t *testing.T) {
cs := testCampaignService(t)
_, err := cs.Mutate(
CampaignOperations{
"SET": {
Campaign{
Id: 10113627246,
//Name:
Status: "ENABLED",
BiddingStrategyConfiguration: &BiddingStrategyConfiguration{Scheme: &BiddingScheme{Type: "TargetCpaBiddingScheme", TargetCpa: &TargetCpa{Amount: 190000}}},
},
},
},
)
if err != nil {
t.Fatal(err)
}
return
}
//feed
func TestCampaignFeed(t *testing.T) {
var selector Selector
selector = Selector{
Fields: []string{
"FeedId",
"CampaignId",
/* "BiddingStrategyId",
"BiddingStrategyName",
"BiddingStrategyType",
"TargetCpa",
"Status",
"ServingStatus", */
"MatchingFunction",
//"Amount",
"PlaceholderTypes",
"Status",
"BaseCampaignId",
},
//Predicates: []Predicate{{"Name", "EQUALS", []string{"Search-Clean wipes US-US-E-CPA-200513-PY"}}},
}
selector.XMLName = xml.Name{baseUrl, "selector"}
s := testCampaignService(t)
respBody, err := s.Auth.request(
campaignFeedServiceUrl,
"get",
struct {
XMLName xml.Name
Sel Selector
}{
XMLName: xml.Name{
Space: baseUrl,
Local: "get",
},
Sel: selector,
},
)
log.Println(string(respBody))
if err != nil {
t.Fatal(err)
}
return
}
func TestCampaignConversionType(t *testing.T) {
var selector Selector
selector = Selector{
Fields: []string{
"Name",
"Status",
"Category",
"CountingType",
},
//Predicates: []Predicate{{"Name", "EQUALS", []string{"Search-Clean wipes US-US-E-CPA-200513-PY"}}},
}
selector.XMLName = xml.Name{baseUrl, "selector"}
s := &ConversionTrackerService{Auth: testAuthSetup(t)}
s.Get(selector)
/* log.Println(string(respBody))
if err != nil {
t.Fatal(err)
} */
return
}
/* 435205089 */
/* 434712695 */
|
/*
MIT License
Copyright (c) 2020-2021 Kazuhito Suda
This file is part of NGSI Go
https://github.com/lets-fiware/ngsi-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package ngsilib
import (
"encoding/base64"
"time"
)
type idmBasic struct {
}
func (i *idmBasic) requestToken(ngsi *NGSI, client *Client, tokenInfo *TokenInfo) (*TokenInfo, error) {
const funcName = "requestTokenBasic"
username, password, err := getUserNamePassword(client)
if err != nil {
return nil, &LibError{funcName, 1, err.Error(), err}
}
token := base64.URLEncoding.EncodeToString([]byte(username + ":" + password))
utime := ngsi.TimeLib.NowUnix()
tokenInfo.Type = CBasic
tokenInfo.Token = token
tokenInfo.RefreshToken = ""
tokenInfo.Expires = time.Unix(utime+3600, 0)
return tokenInfo, nil
}
func (i *idmBasic) revokeToken(ngsi *NGSI, client *Client, tokenInfo *TokenInfo) error {
return nil
}
func (i *idmBasic) getAuthHeader(token string) (string, string) {
return "Authorization", "Basic " + token
}
func (i *idmBasic) getTokenInfo(tokenInfo *TokenInfo) ([]byte, error) {
const funcName = "getTokenInfoBasic"
return nil, &LibError{funcName, 1, "no information available", nil}
}
func (i *idmBasic) checkIdmParams(idmParams *IdmParams) error {
const funcName = "checkIdmParamsBasic"
if idmParams.IdmHost == "" &&
idmParams.Username != "" &&
idmParams.Password != "" &&
idmParams.ClientID == "" &&
idmParams.ClientSecret == "" &&
idmParams.HeaderName == "" &&
idmParams.HeaderValue == "" &&
idmParams.HeaderEnvValue == "" {
return nil
}
return &LibError{funcName, 1, "username and password are needed", nil}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ui
import (
"context"
"net/http"
"net/http/httptest"
"path/filepath"
"regexp"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/quicksettings"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MediaSessionAPI,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Verify the control buttons exist and there should be a space for artwork if the audio has artwork",
Contacts: []string{
"cj.tsai@cienet.com",
"cienet-development@googlegroups.com",
"chromeos-sw-engprod@google.com",
"cros-status-area-eng@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Data: []string{"media_session_api.html", "Lenna.png", "five_minute_audio_20211116.mp3"},
Params: []testing.Param{
{
Fixture: "chromeLoggedIn",
Val: browser.TypeAsh,
}, {
Name: "lacros",
Fixture: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Val: browser.TypeLacros,
},
},
Timeout: 3 * time.Minute,
})
}
// MediaSessionAPI verifies the control buttons exist and there should be a space for artwork if the audio has artwork.
func MediaSessionAPI(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
browserType := s.Param().(browser.Type)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to open test API connection: ", err)
}
ui := uiauto.New(tconn)
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to open keyboard: ", err)
}
defer kb.Close()
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
// Setup browser based on the chrome type.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, browserType)
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
conn, err := br.NewConn(ctx, filepath.Join(server.URL, "media_session_api.html"))
if err != nil {
s.Fatal("Failed to open page: ", err)
}
defer func(ctx context.Context) {
faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_dump")
conn.CloseTarget(ctx)
conn.Close()
}(cleanupCtx)
if err := webutil.WaitForQuiescence(ctx, conn, time.Minute); err != nil {
s.Fatal("Failed to wait until the page is stable: ", err)
}
browserFinder := nodewith.Ancestor(nodewith.Role(role.Window).HasClass("BrowserFrame").NameContaining("MediaSessionAPI"))
if browserType == browser.TypeLacros {
classNameRegexp := regexp.MustCompile(`^ExoShellSurface(-\d+)?$`)
browserFinder = nodewith.Ancestor(nodewith.Role(role.Window).ClassNameRegex(classNameRegexp).NameContaining("MediaSessionAPI"))
}
playButton := browserFinder.Name("play").Role(role.Button)
if err := uiauto.Combine("play the audio",
ui.LeftClick(playButton),
// It might take a longer time to wait until the button show up.
ui.WithTimeout(time.Minute).WaitUntilExists(quicksettings.PinnedMediaControls),
ui.LeftClick(quicksettings.PinnedMediaControls),
ui.WaitUntilExists(quicksettings.MediaControlsDialog),
)(ctx); err != nil {
s.Fatal("Failed to complete all actions: ", err)
}
for _, test := range []mediaSessionAPITest{
{
ui: ui,
hasArtwork: true,
}, {
ui: ui,
hasArtwork: false,
},
} {
subtest := func(ctx context.Context, s *testing.State) {
cleanupSubCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
if err := conn.Call(ctx, &test.audioName, "getTitleWithArtwork", test.hasArtwork); err != nil {
s.Fatal("Failed to get the title of audio: ", err)
}
defer faillog.DumpUITreeWithScreenshotOnError(cleanupSubCtx, s.OutDir(), s.HasError, cr, test.audioName)
s.Logf("Switching to target audio: %q", test.audioName)
if err := test.switchToTargetAudio(ctx); err != nil {
s.Fatal("Failed to switch to target audio: ", err)
}
s.Log("Verifing media controls buttons exist")
if err := test.verifyMediaControlNodes(ctx); err != nil {
s.Fatal("Failed to verify nodes in media control: ", err)
}
s.Log("Verifing media artwork")
if err := test.verifyArtwork(ctx); err != nil {
s.Fatal("Failed to verify artwork existed: ", err)
}
}
if !s.Run(ctx, test.audioName, subtest) {
s.Errorf("Failed to run subtest: %q", test.audioName)
}
}
}
type mediaSessionAPITest struct {
ui *uiauto.Context
audioName string
hasArtwork bool
}
func (m *mediaSessionAPITest) switchToTargetAudio(ctx context.Context) error {
if err := uiauto.IfSuccessThen(
m.ui.WaitUntilGone(quicksettings.MediaControlsDialog),
m.ui.LeftClick(quicksettings.PinnedMediaControls),
)(ctx); err != nil {
return err
}
audioLabel := nodewith.NameContaining(m.audioName).Role(role.StaticText).HasClass("Label")
return uiauto.IfSuccessThen(
m.ui.WaitUntilGone(audioLabel.Ancestor(quicksettings.MediaControlsDialog)),
m.ui.RetryUntil(
m.ui.LeftClick(nodewith.Name("Next Track").Role(role.Button).Ancestor(quicksettings.MediaControlsDialog)),
m.ui.WithTimeout(3*time.Second).WaitUntilExists(audioLabel.Ancestor(quicksettings.MediaControlsDialog)),
),
)(ctx)
}
func (m *mediaSessionAPITest) verifyMediaControlNodes(ctx context.Context) error {
audioLabel := nodewith.NameContaining(m.audioName).Role(role.StaticText).HasClass("Label")
if err := uiauto.IfSuccessThen(
m.ui.WaitUntilGone(audioLabel.Ancestor(quicksettings.MediaControlsDialog)),
m.ui.LeftClick(quicksettings.PinnedMediaControls),
)(ctx); err != nil {
return err
}
for _, node := range []*nodewith.Finder{
nodewith.Name("Pause").Role(role.ToggleButton),
nodewith.Name("Seek Backward").Role(role.Button),
nodewith.Name("Seek Forward").Role(role.Button),
nodewith.Name("Previous Track").Role(role.Button),
nodewith.Name("Next Track").Role(role.Button),
} {
if err := m.ui.WaitUntilExists(node)(ctx); err != nil {
return errors.Wrap(err, "failed to find node")
}
}
return nil
}
func (m *mediaSessionAPITest) verifyArtwork(ctx context.Context) error {
mediaListContainer := nodewith.Role(role.ListItem).HasClass("MediaNotificationViewImpl").Ancestor(quicksettings.MediaControlsDialog)
mediaListContainerLocation, err := m.ui.Location(ctx, mediaListContainer)
if err != nil {
return errors.Wrap(err, "failed to get the location of media list container")
}
testing.ContextLog(ctx, "Media list container location: ", mediaListContainerLocation)
// The image view is not available on the UI tree and its existence can only be examined by other nodes.
// These are the final details used to examine if the image view exists.
var (
dissmissBtnLocation *coords.Rect
mediaDetailViewLocation *coords.Rect
)
dismissBtn := nodewith.Role(role.Button).Name("Dismiss").Ancestor(mediaListContainer)
if err := uiauto.Combine("make dismiss button visible",
m.ui.WaitForLocation(mediaListContainer),
m.ui.MouseMoveTo(mediaListContainer, 200*time.Millisecond),
m.ui.WaitUntilExists(dismissBtn),
)(ctx); err != nil {
return err
}
if dissmissBtnLocation, err = m.ui.Location(ctx, dismissBtn); err != nil {
return errors.Wrap(err, "failed to get the location of dismiss button")
}
testing.ContextLog(ctx, "Dissmiss button location: ", dissmissBtnLocation)
subViews := nodewith.HasClass("View").Ancestor(mediaListContainer)
if err := m.ui.WaitUntilExists(subViews.First())(ctx); err != nil {
return errors.Wrap(err, "failed to find any subviews under the media control dialog")
}
subViewInfos, err := m.ui.NodesInfo(ctx, subViews)
if err != nil {
return err
}
var subviewNth int
matchedCnt := 0
testing.ContextLog(ctx, "Searching for subview has the same width as media list container")
for nth, subViewInfo := range subViewInfos {
// All subviews under media list container are identical on the UI tree.
// We examine the width of a subview to further locate the target.
if subViewInfo.Location.Width != mediaListContainerLocation.Width {
continue
}
// Expecting 2 subviews have the same width as media list container:
// 1. list header view
// 2. list contents container view
// Second one is the target, let the value be overwritten directly.
subviewNth = nth
matchedCnt++
}
if matchedCnt != 2 {
return errors.Errorf("expecting 2 subviews have the same width as media list container, got: %d", matchedCnt)
}
listContentsContainerView := subViews.Nth(subviewNth)
// The first subview under the list contents container view is the final target.
mediaDetailView := nodewith.HasClass("View").First().Ancestor(listContentsContainerView)
if mediaDetailViewLocation, err = m.ui.Location(ctx, mediaDetailView); err != nil {
return errors.Wrap(err, "failed to get the location of media detail view")
}
testing.ContextLog(ctx, "Media detail view location: ", mediaDetailViewLocation)
// The image view is not available on the UI tree.
// The dismiss button is located at the right side of media controls dialog and right-aligned to media item.
// Within a media item, there should be a detail view on the left and an image view on the right.
// If the detail view's right bound is reached to the right bound of the media item, meaning the image view doesn't exist (there is no space for image view).
hasArtwork := mediaDetailViewLocation.Right() != dissmissBtnLocation.Right()
if hasArtwork != m.hasArtwork {
return errors.Errorf("failed to verify media has artwork: want %t, got %t", m.hasArtwork, hasArtwork)
}
return nil
}
|
package gnet
import (
guid "github.com/satori/go.uuid"
)
// SimpleGuacamoleTunnel ==> AbstractGuacamoleTunnel
// * GuacamoleTunnel implementation which uses a provided socket. The UUID of
// * the tunnel will be randomly generated.
type SimpleGuacamoleTunnel struct {
AbstractGuacamoleTunnel
/**
* The UUID associated with this tunnel. Every tunnel must have a
* corresponding UUID such that tunnel read/write requests can be
* directed to the proper tunnel.
*/
uuid guid.UUID
/**
* The GuacamoleSocket that tunnel should use for communication on
* behalf of the connecting user.
*/
socket GuacamoleSocket
}
// NewSimpleGuacamoleTunnel Construct function
func NewSimpleGuacamoleTunnel(socket GuacamoleSocket) (ret GuacamoleTunnel) {
u, _ := guid.NewV4()
one := SimpleGuacamoleTunnel{
uuid: u,
socket: socket,
}
one.AbstractGuacamoleTunnel = NewAbstractGuacamoleTunnel(&one)
ret = &one
return
}
// GetUUID override GuacamoleTunnel.GetUUID
func (opt *SimpleGuacamoleTunnel) GetUUID() guid.UUID {
return opt.uuid
}
// GetSocket override GuacamoleTunnel.GetSocket
func (opt *SimpleGuacamoleTunnel) GetSocket() GuacamoleSocket {
return opt.socket
}
|
package main
import (
"log"
"os"
"time"
"persistence"
)
var dbPersistence persistence.Persistence
func init() {
var err error
accessKey := os.Getenv("test1_aws_access_key")
secretKey := os.Getenv("test1_aws_secret_key")
region := os.Getenv("test1_aws_region")
dbPersistence, err = persistence.NewPersistenceDynamoDb(&accessKey, &secretKey, ®ion)
if err != nil {
log.Println("Error creating a DynamoDb persistence client")
panic(err)
}
}
func main() {
note := persistence.Note{
Id: "15fc4e1c-49f8-4723-9d06-90525b67de4d",
Timestamp: time.Now().UTC().Format("2006-01-02T15:04:05.999999"),
Title: "foobar new",
Body: "foofoofoof new",
}
err := dbPersistence.Update(¬e)
if err == nil {
log.Println("Successfully updated your note in the database!")
} else {
log.Panicf("Error updating your note in the database: %v", err)
}
}
|
package contract
import "github.com/fanaticscripter/EggContractor/api"
type ProgressInfo struct {
EggsLaid float64
ProjectedEggsLaid float64
Rewards []*Reward
UltimateGoal float64
}
type Reward struct {
*api.Reward
PercentageOfUltimateGoal float64
PercentageCompleted float64
}
func NewProgressInfo(
rewards []*api.Reward,
eggsLaid float64,
projectedEggsLaid float64,
) *ProgressInfo {
if len(rewards) == 0 {
return nil
}
ultimateGoal := rewards[len(rewards)-1].Goal
if ultimateGoal == 0 {
panic("NewProgressInfo: ultimate goal is zero")
}
wrappedRewards := make([]*Reward, 0)
for _, r := range rewards {
if r.Goal == 0 {
panic("NewProgressInfo: reward goal is zero")
}
wrappedRewards = append(wrappedRewards, &Reward{
Reward: r,
PercentageOfUltimateGoal: r.Goal / ultimateGoal * 100,
PercentageCompleted: eggsLaid / r.Goal * 100,
})
}
return &ProgressInfo{
EggsLaid: eggsLaid,
ProjectedEggsLaid: projectedEggsLaid,
Rewards: wrappedRewards,
UltimateGoal: ultimateGoal,
}
}
func (p *ProgressInfo) PercentageCompleted() float64 {
return p.EggsLaid / p.UltimateGoal * 100
}
func (p *ProgressInfo) ProjectedPercentageCompleted() float64 {
return p.ProjectedEggsLaid / p.UltimateGoal * 100
}
|
package usecase
import (
"errors"
"fmt"
"github.com/go-pg/pg/v10"
"marketplace/accounts/domain"
)
type AdminDeleteUserCmd func(db *pg.DB, userId int64) error
func AdminDeleteUser() AdminDeleteUserCmd {
return func(db *pg.DB, userId int64) error {
user := domain.Account{
Id: userId,
}
err := db.Model(&user).WherePK().Select()
if err != nil {
return errors.New("not found")
}
fmt.Println("user : ", user)
if user.Email == "" {
return errors.New("not found")
}
_, err = db.Model(&user).WherePK().Delete()
if err != nil {
return err
}
return nil
}
}
|
package main
import "fmt"
/*
struct方法中,指针类型的接收者必须是合法指针(包括 nil),或能获取实例地址
*/
type X struct {
}
func (x *X) callmethod() {
fmt.Println("test")
}
func main() {
//nil是合法的调用
var x *X
x.callmethod()
//cannot take the address of X literal, X{}是不可寻址的
//X{}.callmethod()
// 正确处理
a := X{}
a.callmethod()
findItemInMap()
//通过函数创建T
//GetT().n = 1 无法寻址
t := GetT()
t.n = 1 //或 p := &(t.n) *p = 1
fmt.Println(t)
}
func findItemInMap() {
x := map[string]string{"one":"a","two":"","three":"c"}
v := x["one"]
fmt.Println(v)
//检查map是否存在值,使用map返回的第二个参数
if _, ok := x["once"]; !ok {
fmt.Println("no once")
}
}
type T struct {
n int
}
func GetT() T {
return T{}
} |
// Copyright 2020 cloudeng llc. All rights reserved.
// Use of this source code is governed by the Apache-2.0
// license that can be found in the LICENSE file.
package lcs_test
import (
"bytes"
"fmt"
"hash/fnv"
"reflect"
"strings"
"testing"
"unicode/utf8"
"cloudeng.io/algo/codec"
"cloudeng.io/algo/lcs"
"cloudeng.io/errors"
)
func ExampleMyers() {
runeDecoder, _ := codec.NewDecoder(utf8.DecodeRune)
a, b := runeDecoder.Decode([]byte("ABCABBA")), runeDecoder.Decode([]byte("CBABAC"))
fmt.Printf("%s\n", string(lcs.NewMyers(a, b).LCS().([]int32)))
// Output:
// BABA
}
func ExampleDP() {
runeDecoder, _ := codec.NewDecoder(utf8.DecodeRune)
a, b := runeDecoder.Decode([]byte("AGCAT")), runeDecoder.Decode([]byte("GAC"))
all := lcs.NewDP(a, b).AllLCS().([][]int32)
for _, lcs := range all {
fmt.Printf("%s\n", string(lcs))
}
// Output:
// GA
// GA
// GC
// AC
}
func isOneOf(got string, want []string) bool {
if len(got) == 0 && len(want) == 0 {
return true
}
for _, w := range want {
if got == w {
return true
}
}
return false
}
func lcsFromEdits(typ interface{}, script lcs.EditScript) interface{} {
switch typ.(type) {
case int64:
r := []int64{}
for _, op := range script {
if op.Op == lcs.Identical {
r = append(r, op.Val.(int64))
}
}
return r
case int32:
r := []int32{}
for _, op := range script {
if op.Op == lcs.Identical {
r = append(r, op.Val.(int32))
}
}
return r
case uint8:
r := []uint8{}
for _, op := range script {
if op.Op == lcs.Identical {
r = append(r, op.Val.(uint8))
}
}
return r
}
panic(fmt.Sprintf("unsupported type %T", typ))
}
func validateInsertions(t *testing.T, i int, edits lcs.EditScript, b interface{}) {
for _, e := range edits {
if e.Op != lcs.Insert {
continue
}
switch v := e.Val.(type) {
case int64:
if got, want := v, b.([]int64)[e.B]; got != want {
t.Errorf("%v: %v: got %v, want %v", errors.Caller(2, 1), i, got, want)
}
case int32:
if got, want := v, b.([]int32)[e.B]; got != want {
t.Errorf("%v: %v: got %c, want %c", errors.Caller(2, 1), i, got, want)
}
case uint8:
if got, want := v, b.([]uint8)[e.B]; got != want {
t.Errorf("%v: %v: got %c, want %c", errors.Caller(2, 1), i, got, want)
}
}
}
}
func decoders(t *testing.T) (i32, u8 codec.Decoder) {
i32, err := codec.NewDecoder(utf8.DecodeRune)
if err != nil {
t.Fatalf("NewDecoder: %v", err)
}
u8, err = codec.NewDecoder(func(input []byte) (byte, int) {
return input[0], 1
})
if err != nil {
t.Fatalf("NewDecoder: %v", err)
}
return
}
type implementation interface {
LCS() interface{}
SES() lcs.EditScript
}
func testutf8(t *testing.T, impl implementation, i int, a, b []int32, all []string) {
lcs32 := impl.LCS().([]int32)
if got, want := string(lcs32), all; !isOneOf(got, want) {
t.Errorf("%v: got %v is not one of %v", i, got, want)
}
edit := impl.SES()
if got, want := lcsFromEdits(int32(0), edit).([]int32), lcs32; !reflect.DeepEqual(got, want) {
t.Errorf("%v: got %v, want %v", i, string(got), string(want))
}
// test edit string by recreating 'b' from 'a'.
validateInsertions(t, i, edit, b)
if got, want := string(edit.Apply(a).([]int32)), string(b); got != want {
t.Errorf("%v: got %v want %v for %s -> %s via %s", i, got, want, string(a), string(b), edit.String())
}
// and 'a' from 'b'
reverse := lcs.Reverse(edit)
validateInsertions(t, i, reverse, a)
if got, want := string(reverse.Apply(b).([]int32)), string(a); got != want {
t.Errorf("%v: got %v want %v for %s -> %s via %s", i, got, want, string(b), string(a), edit.String())
}
}
func testbyte(t *testing.T, impl implementation, i int, a, b []uint8, all []string) {
lcs32 := impl.LCS().([]uint8)
if got, want := string(lcs32), all; !isOneOf(got, want) {
t.Errorf("%v: got %v is not one of %v", i, got, want)
}
// test edit string by recreating 'b' from 'a'.
edit := impl.SES()
if got, want := lcsFromEdits(uint8(0), edit).([]uint8), lcs32; !reflect.DeepEqual(got, want) {
t.Errorf("%v: got %v, want %v", i, string(got), string(want))
}
validateInsertions(t, i, edit, b)
if got, want := string(edit.Apply(a).([]uint8)), string(b); got != want {
t.Errorf("%v: got %v want %v for %s -> %s via %s", i, got, want, string(a), string(b), edit.String())
}
// and 'a' from 'b'
reverse := lcs.Reverse(edit)
validateInsertions(t, i, reverse, a)
if got, want := string(reverse.Apply(b).([]uint8)), string(a); got != want {
t.Errorf("%v: got %v want %v for %s -> %s via %s", i, got, want, string(b), string(a), edit.String())
}
}
func TestLCS(t *testing.T) {
l := func(s ...string) []string {
if len(s) == 0 {
return []string{}
}
return s
}
i32, u8 := decoders(t)
for i, tc := range []struct {
a, b string
all []string
}{
// Example from myer's 1986 paper.
{"ABCABBA", "CBABAC", l("BABA", "CABA", "CBBA")},
// Wikipedia dynamic programming example.
{"AGCAT", "GAC", l("AC", "GA", "GC")},
{"XMJYAUZ", "MZJAWXU", l("MJAU")},
// Longer examples.
{"ABCADEFGH", "ABCIJKFGH", l("ABCFGH")},
{"ABCDEF1234", "PQRST2UV4", l("24")},
{"SABCDE", "SC", l("SC")},
{"SABCDE", "SSC", l("SC")},
// More exhaustive cases.
{"", "", l()},
{"", "B", l()},
{"B", "", l()},
{"A", "A", l("A")},
{"AB", "AB", l("AB")},
{"AB", "ABC", l("AB")},
{"ABC", "AB", l("AB")},
{"AC", "AXC", l("AC")},
{"ABC", "ABX", l("AB")},
{"ABC", "ABXY", l("AB")},
{"ABXY", "AB", l("AB")},
// Example where rune and byte results are identical.
{"日本語", "日本de語", l("日本語")},
} {
a, b := i32.Decode([]byte(tc.a)), i32.Decode([]byte(tc.b))
myers := lcs.NewMyers(a, b)
testutf8(t, myers, i, a.([]int32), b.([]int32), tc.all)
dp := lcs.NewDP(a, b)
testutf8(t, dp, i, a.([]int32), b.([]int32), tc.all)
a, b = u8.Decode([]byte(tc.a)), u8.Decode([]byte(tc.b))
myers = lcs.NewMyers(a, b)
testbyte(t, myers, i, a.([]uint8), b.([]uint8), tc.all)
dp = lcs.NewDP(a, b)
testbyte(t, dp, i, a.([]uint8), b.([]uint8), tc.all)
}
}
func TestUTF8(t *testing.T) {
i32, u8 := decoders(t)
// Test case for correct utf8 handling.
// a: 日本語
// b: 日本語 with the middle byte of the middle rune changed.
// A correct rune aware lcs will be 日語, whereas a byte based one will
// include the 0xe6 first byte from the middle rune but skip the two
// trailing bytes.
ra := []byte{0xe6, 0x97, 0xa5, 0xe6, 0x9c, 0xac, 0xe8, 0xaa, 0x9e}
rb := []byte{0xe6, 0x97, 0xa5, 0xe6, 0x00, 0x00, 0xe8, 0xaa, 0x9e}
a, b := i32.Decode(ra), i32.Decode(rb)
myers := lcs.NewMyers(a, b)
if got, want := string(myers.LCS().([]int32)), "日語"; got != want {
t.Errorf("got %v, want %v", got, want)
}
a, b = u8.Decode(ra), u8.Decode(rb)
myers = lcs.NewMyers(a, b)
if got, want := string(myers.LCS().([]byte)), "日\xe6語"; got != want {
t.Errorf("got %#v, want %x %v", got, want, want)
}
for i, tc := range []struct {
a, b string
output string
}{
{"ABCABBA", "CBABAC", " CB AB AC\n-+|-||-|+\nA C B \n"},
{"AGCAT", "GAC", " G A C\n-|-|-+\nA C T \n"},
{"XMJYAUZ", "MZJAWXU", " MZJ AWXU \n-|+|-|++|-\nX Y Z\n"},
} {
a, b := i32.Decode([]byte(tc.a)), i32.Decode([]byte(tc.b))
myers := lcs.NewMyers(a, b)
edit := myers.SES()
out := &strings.Builder{}
lcs.FormatHorizontal(out, a, edit)
if got, want := out.String(), tc.output; got != want {
t.Errorf("%v: got\n%v, want\n%v", i, got, want)
}
}
}
func TestLines(t *testing.T) {
la := `
line1 a b c
line2 d e f
line3 hello
world
`
lb := `
line2 d e f
hello
world
`
lines := map[uint64]string{}
lineDecoder := func(data []byte) (int64, int) {
idx := bytes.Index(data, []byte{'\n'})
if idx <= 0 {
return 0, 1
}
h := fnv.New64a()
h.Write(data[:idx])
sum := h.Sum64()
lines[sum] = string(data[:idx])
return int64(sum), idx + 1
}
ld, err := codec.NewDecoder(lineDecoder)
if err != nil {
t.Fatalf("NewDecoder: %v", err)
}
a, b := ld.Decode([]byte(la)), ld.Decode([]byte(lb))
myers := lcs.NewMyers(a, b)
edits := myers.SES()
validateInsertions(t, 0, edits, b)
var reconstructed string
for _, op := range edits {
switch op.Op {
case lcs.Identical:
reconstructed += lines[uint64(a.([]int64)[op.A])] + "\n"
case lcs.Insert:
reconstructed += lines[uint64(op.Val.(int64))] + "\n"
}
}
if got, want := reconstructed, lb; got != want {
t.Errorf("got %v, want %v", got, want)
}
out := &strings.Builder{}
lcs.FormatVertical(out, a, edits)
if got, want := out.String(), ` 0
- 6864772235558415538
-8997218578518345818
+ -6615550055289275125
- -7192184552745107772
5717881983045765875
`; got != want {
t.Errorf("got %v, want %v", got, want)
}
}
|
package main
//689. 三个无重叠子数组的最大和
//给你一个整数数组 nums 和一个整数 k ,找出三个长度为 k 、互不重叠、且3 * k 项的和最大的子数组,并返回这三个子数组。
//
//以下标的数组形式返回结果,数组中的每一项分别指示每个子数组的起始位置(下标从 0 开始)。如果有多个结果,返回字典序最小的一个。
//
//
//
//示例 1:
//
//输入:nums = [1,2,1,2,6,7,5,1], k = 2
//输出:[0,3,5]
//解释:子数组 [1, 2], [2, 6], [7, 5] 对应的起始下标为 [0, 3, 5]。
//也可以取 [2, 1], 但是结果 [1, 3, 5] 在字典序上更大。
//示例 2:
//
//输入:nums = [1,2,1,2,1,2,1,2,1], k = 2
//输出:[0,2,4]
//
//
//提示:
//
//1 <= nums.length <= 2 * 10^4
//1 <= nums[i] <2^16
//1 <= k <= floor(nums.length / 3)
//思路 滑动窗口
func maxSumOfThreeSubarrays(nums []int, k int) []int {
n := len(nums)
index1, maxIndex1, index2 := 0, 0, 0
sum1, sum2, sum3 := 0, 0, 0
maxSum1, maxSum2, maxSum3 := 0, 0, 0
ans := make([]int, 0)
for i := k * 2; i < n; i++ {
sum1 += nums[i-k*2]
sum2 += nums[i-k]
sum3 += nums[i]
if i >= k*3-1 {
if sum1 > maxSum1 {
maxSum1 = sum1
index1 = i - k*3 + 1
}
if sum2+maxSum1 > maxSum2 {
maxSum2 = sum2 + maxSum1
maxIndex1, index2 = index1, i-k*2+1
}
if sum3+maxSum2 > maxSum3 {
maxSum3 = sum3 + maxSum2
ans = []int{maxIndex1, index2, i - k + 1}
}
sum1 -= nums[i-k*3+1]
sum2 -= nums[i-k*2+1]
sum3 -= nums[i-k+1]
}
}
return ans
}
func main() {
println(maxSumOfThreeSubarrays([]int{1, 2, 1, 2, 6, 7, 5, 1}, 2))
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package video
import (
"context"
"net/http"
"time"
"chromiumos/tast/common/media/caps"
"chromiumos/tast/local/bundles/cros/video/play"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/testing"
)
// seekTest is used to describe the config used to run each Seek test.
type seekTest struct {
filename string // File name to play back.
numSeeks int // Amount of times to seek into the <video>.
browserType browser.Type
}
func init() {
testing.AddTest(&testing.Test{
Func: Seek,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Verifies that seeking works in Chrome, either with or without resolution changes",
Contacts: []string{
"mcasas@chromium.org",
"chromeos-gfx-video@google.com",
},
SoftwareDeps: []string{"chrome"},
Data: []string{"video.html", "playback.js"},
Params: []testing.Param{{
Name: "av1",
Val: seekTest{
filename: "720_av1.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_av1.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeAV1},
Fixture: "chromeVideoWithHWAV1Decoding",
}, {
Name: "h264",
Val: seekTest{
filename: "720_h264.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"},
Fixture: "chromeVideo",
}, {
Name: "h264_lacros",
Val: seekTest{
filename: "720_h264.mp4",
numSeeks: 25,
browserType: browser.TypeLacros,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs", "lacros"},
Fixture: "chromeVideoLacros",
}, {
Name: "hevc",
Val: seekTest{
filename: "720_hevc.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_hevc.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeHEVC, "proprietary_codecs"},
Fixture: "chromeVideo",
}, {
Name: "vp8",
Val: seekTest{
filename: "720_vp8.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_vp8.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP8},
Fixture: "chromeVideo",
}, {
Name: "vp9",
Val: seekTest{
filename: "720_vp9.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9},
Fixture: "chromeVideo",
}, {
Name: "vp9_lacros",
Val: seekTest{
filename: "720_vp9.webm",
numSeeks: 25,
browserType: browser.TypeLacros,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9, "lacros"},
Fixture: "chromeVideoLacros",
}, {
Name: "switch_av1",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.av1.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.av1.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeAV1},
Fixture: "chromeVideoWithHWAV1Decoding",
}, {
Name: "switch_h264",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.h264.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"},
Fixture: "chromeVideo",
}, {
Name: "switch_hevc",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.hevc.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.hevc.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeHEVC, "proprietary_codecs"},
Fixture: "chromeVideo",
}, {
Name: "switch_vp8",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.vp8.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.vp8.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP8},
Fixture: "chromeVideo",
}, {
Name: "switch_vp9",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.vp9.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9},
Fixture: "chromeVideo",
}, {
Name: "stress_av1",
Val: seekTest{
filename: "720_av1.mp4",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_av1.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeAV1},
Timeout: 20 * time.Minute,
Fixture: "chromeVideoWithHWAV1Decoding",
}, {
Name: "stress_vp8",
Val: seekTest{
filename: "720_vp8.webm",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_vp8.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP8},
Timeout: 20 * time.Minute,
Fixture: "chromeVideo",
}, {
Name: "stress_vp9",
Val: seekTest{
filename: "720_vp9.webm",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9},
Timeout: 20 * time.Minute,
Fixture: "chromeVideo",
}, {
Name: "stress_h264",
Val: seekTest{
filename: "720_h264.mp4",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "proprietary_codecs"},
Timeout: 20 * time.Minute,
Fixture: "chromeVideo",
}, {
Name: "stress_hevc",
Val: seekTest{
filename: "720_hevc.mp4",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_hevc.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeHEVC, "proprietary_codecs"},
Timeout: 20 * time.Minute,
Fixture: "chromeVideo",
}, {
Name: "h264_alt",
Val: seekTest{
filename: "720_h264.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "video_decoder_legacy_supported", "proprietary_codecs"},
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "vp8_alt",
Val: seekTest{
filename: "720_vp8.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_vp8.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP8, "video_decoder_legacy_supported"},
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "vp9_alt",
Val: seekTest{
filename: "720_vp9.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"720_vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9, "video_decoder_legacy_supported"},
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "switch_h264_alt",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.h264.mp4",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "video_decoder_legacy_supported", "proprietary_codecs"},
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "switch_vp8_alt",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.vp8.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.vp8.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP8, "video_decoder_legacy_supported"},
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "switch_vp9_alt",
Val: seekTest{
filename: "smpte_bars_resolution_ladder.vp9.webm",
numSeeks: 25,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
ExtraData: []string{"smpte_bars_resolution_ladder.vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9, "video_decoder_legacy_supported"},
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "stress_vp8_alt",
Val: seekTest{
filename: "720_vp8.webm",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_vp8.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP8, "video_decoder_legacy_supported"},
Timeout: 20 * time.Minute,
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "stress_vp9_alt",
Val: seekTest{
filename: "720_vp9.webm",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_vp9.webm"},
ExtraSoftwareDeps: []string{caps.HWDecodeVP9, "video_decoder_legacy_supported"},
Timeout: 20 * time.Minute,
Fixture: "chromeAlternateVideoDecoder",
}, {
Name: "stress_h264_alt",
Val: seekTest{
filename: "720_h264.mp4",
numSeeks: 1000,
browserType: browser.TypeAsh,
},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_weekly"},
ExtraData: []string{"720_h264.mp4"},
ExtraSoftwareDeps: []string{caps.HWDecodeH264, "video_decoder_legacy_supported", "proprietary_codecs"},
Timeout: 20 * time.Minute,
Fixture: "chromeAlternateVideoDecoder",
}},
})
}
// Seek plays a file with Chrome and checks that it can safely be seeked into.
func Seek(ctx context.Context, s *testing.State) {
testOpt := s.Param().(seekTest)
_, l, cs, err := lacros.Setup(ctx, s.FixtValue(), testOpt.browserType)
if err != nil {
s.Fatal("Failed to initialize test: ", err)
}
defer lacros.CloseLacros(ctx, l)
if err := play.TestSeek(ctx, http.FileServer(s.DataFileSystem()), cs, testOpt.filename, s.OutDir(), testOpt.numSeeks); err != nil {
s.Fatal("TestSeek failed: ", err)
}
}
|
package main
/*
#include <freerdp/graphics.h>
*/
import "C"
import (
"log"
)
//export webRdpBitmapNew
func webRdpBitmapNew(context *C.rdpContext, bitmap *C.rdpBitmap) C.BOOL {
log.Println("webRdpBitmapNew")
return C.TRUE
}
//export webRdpBitmapFree
func webRdpBitmapFree(context *C.rdpContext, bitmap *C.rdpBitmap) {
log.Println("webRdpBitmapFree")
}
//export webRdpBitmapPaint
func webRdpBitmapPaint(context *C.rdpContext, bitmap *C.rdpBitmap) C.BOOL {
log.Println("webRdpBitmapPaint")
return C.TRUE
}
//export webRdpBitmapDecompress
func webRdpBitmapDecompress(context *C.rdpContext, bitmap *C.rdpBitmap, data *C.BYTE,
width C.int, height C.int, bpp C.int, length C.int,
compressed C.BOOL, codecId C.int) C.BOOL {
log.Println("webRdpBitmapDecompress")
return C.TRUE
}
//export webRdpBitmapSetSurface
func webRdpBitmapSetSurface(context *C.rdpContext, bitmap *C.rdpBitmap, primary C.BOOL) C.BOOL {
log.Println("webRdpBitmapDecompress")
return C.TRUE
}
|
package main
import (
"fmt"
"os"
"strconv"
)
/*
Passos do quicksort:
1)Escolher um elemento da lista como pivô e removê-lo da lista;
2)Particionar a lista em duas listas distintas: uma contendo elementos menores que o pivô e outra os maiores;
3)Ordenar as duas listas recursivamente;
4)Retornar a combinação da lista ordenada de elementos menores, o próprio pivô e a lista ordenada dos elementos maiores.
*/
func main(){
entrada:=os.Args[1:]
numeros:=make([]int,len(entrada))
/*
Declaramos o slice numeros para armazenar os numeros inteiros: aqui usamos a função nativa make() para criar e inicializar um
slice do tipo []int especificando também seu tamanho inicial como sendo o mesmo da lista recebida como argumento.
*/
for i,n:=range entrada{
numero,err:=strconv.Atoi(n)// convertendo cada numero para inteiro
if err!=nil{
fmt.Printf("%s não é um número válido!\n",n)
os.Exit(1)
}
numeros[i]=numero
}
fmt.Println(quicksort(numeros))
}
func quicksort(numeros []int)[]int{
/*
O primeiro passo é verificar se a lista de entrada está vazia ou se contém apenas um número e,em caso positivo, retornar a
própria lista,também chamada condição de parada.
*/
if len(numeros)<=1{
return numeros
}
//O próximo passo é criar uma cópia do slice original para evitar que ele seja modificado
n:=make([]int,len(numeros))
copy(n,numeros)
//Em seguida, fazemos a escolha do pivô(no meio da lista). Armazena o índice e o próprio pivô
indicePivo:=len(n)/2
pivo:=n[indicePivo]
/*
Com o pivô encontrado, precisamos removê-lo da lista original. Faremos isso através do uso da função append(). Ela adiciona um
elemento ao final de um slice e sua forma geral é: novoSlice:=append(slice,elemento)
Isso pode parecer um tanto estranho quando queremos de fato remover um elemento do slice. Entretanto, combinando o uso do append()
com operações de slice,temos uma construção bastante poderosa e idiomática.
*/
n=append(n[:indicePivo],n[indicePivo+1:]...)
/*
Primeiro, fatiamos o slice n do primeiro elemento até o pivô - n[:indicePivo] - e utilizamos este novo slice com base para
a operação append(). Depois fatiamos novamente n, partindo do elemento imediatamente posterior ao pivô até o último elemento
dispoível - n[indicePivo+1:] - e utilizamos este slice como valor a ser adicionado ao slice-base
*/
menores,maiores:=particionar(n,pivo)
return append(append(quicksort(menores),pivo),quicksort(maiores)...)
}
func particionar(numeros []int,pivo int)(menores []int,maiores []int){
for _,n:= range numeros{
if n<=pivo{
menores=append(menores,n)
}else{
maiores = append(maiores,n)
}
}
return menores,maiores
} |
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package ts
import (
"fmt"
"math"
"reflect"
"testing"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/ts/testmodel"
"github.com/cockroachdb/cockroach/pkg/ts/tspb"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/gogo/protobuf/proto"
"github.com/kr/pretty"
)
// makeInternalRowData makes an InternalTimeSeriesData object from a collection
// of data samples. The result will be in the soon-deprecated row format. Input
// is the start timestamp, the sample duration, and the set of samples. As
// opposed to the ToInternal() method, there are two key differences:
//
// 1. This method always procueds a single InternalTimeSeriesData object with
// the provided startTimestamp, rather than breaking up the datapoints into
// several slabs based on a slab duration.
//
// 2. The provided data samples are downsampled according to the sampleDuration,
// mimicking the process that would be used to create a data rollup. Therefore,
// the resulting InternalTimeSeriesData will have one sample for each sample
// period.
//
// Sample data must be provided ordered by timestamp or the output will be
// unpredictable.
func makeInternalRowData(
startTimestamp, sampleDuration int64, samples []tspb.TimeSeriesDatapoint,
) roachpb.InternalTimeSeriesData {
// Adjust startTimestamp to an exact multiple of sampleDuration.
startTimestamp -= startTimestamp % sampleDuration
result := roachpb.InternalTimeSeriesData{
StartTimestampNanos: startTimestamp,
SampleDurationNanos: sampleDuration,
Samples: make([]roachpb.InternalTimeSeriesSample, 0),
}
// Run through all samples, merging any consecutive samples which correspond
// to the same sample interval.
for _, sample := range samples {
offset := int32((sample.TimestampNanos - startTimestamp) / sampleDuration)
value := sample.Value
// Merge into the previous sample if we have the same offset.
if count := len(result.Samples); count > 0 && result.Samples[count-1].Offset == offset {
// Initialize max and min if necessary.
var min, max float64
if result.Samples[count-1].Count > 1 {
min, max = *result.Samples[count-1].Min, *result.Samples[count-1].Max
} else {
min, max = result.Samples[count-1].Sum, result.Samples[count-1].Sum
}
result.Samples[count-1].Count++
result.Samples[count-1].Sum += value
result.Samples[count-1].Min = proto.Float64(math.Min(min, value))
result.Samples[count-1].Max = proto.Float64(math.Max(max, value))
} else if count > 0 && result.Samples[count-1].Offset > offset {
panic("sample data provided to generateData must be ordered by timestamp.")
} else {
result.Samples = append(result.Samples, roachpb.InternalTimeSeriesSample{
Offset: offset,
Sum: value,
Count: 1,
})
}
}
return result
}
// makeInternalRowData makes an InternalTimeSeriesData object from a collection
// of data samples. The result will be in columnar format. Input is the start
// timestamp, the sample duration, and the set of samples. As opposed to the
// ToInternal() method, there are two key differences:
//
// 1. This method always procueds a single InternalTimeSeriesData object with
// the provided startTimestamp, rather than breaking up the datapoints into
// several slabs based on a slab duration.
//
// 2. The provided data samples are downsampled according to the sampleDuration,
// mimicking the process that would be used to create a data rollup. Therefore,
// the resulting InternalTimeSeriesData will have one entry for each offset
// period. Additionally, if there are multiple datapoints in any sample period,
// then the desired result is assumed to be a rollup and every resulting sample
// period will have values for all rollup columns.
//
// Sample data must be provided ordered by timestamp or the output will be
// unpredictable.
func makeInternalColumnData(
startTimestamp, sampleDuration int64, samples []tspb.TimeSeriesDatapoint,
) roachpb.InternalTimeSeriesData {
// Adjust startTimestamp to an exact multiple of sampleDuration.
startTimestamp -= startTimestamp % sampleDuration
result := roachpb.InternalTimeSeriesData{
StartTimestampNanos: startTimestamp,
SampleDurationNanos: sampleDuration,
}
// Run through all samples, merging any consecutive samples which correspond
// to the same sample interval. Assume that the data will contain relevant
// roll-ups, but discard the roll-up data if there is only one sample per
// sample period.
isRollup := false
// Variance computation must consider each value against the average.
// Retain the component values of each column and compute a variance.
valuesForSample := make([]float64, 0, 1)
computeVariance := func() float64 {
variance := 0.0
if len(valuesForSample) > 1 {
// Compute average of values.
sum := 0.0
for _, value := range valuesForSample {
sum += value
}
avg := sum / float64(len(valuesForSample))
// Compute variance of values using the average.
totalSquaredDeviation := 0.0
for _, value := range valuesForSample {
totalSquaredDeviation += math.Pow(value-avg, 2)
}
variance = totalSquaredDeviation / float64(len(valuesForSample))
}
// Reset value collection.
valuesForSample = valuesForSample[:0]
return variance
}
for _, sample := range samples {
offset := result.OffsetForTimestamp(sample.TimestampNanos)
value := sample.Value
// Merge into the previous sample if we have the same offset.
if count := len(result.Offset); count > 0 && result.Offset[count-1] == offset {
isRollup = true
result.Last[count-1] = value
result.Count[count-1]++
result.Sum[count-1] += value
result.Max[count-1] = math.Max(result.Max[count-1], value)
result.Min[count-1] = math.Min(result.Min[count-1], value)
valuesForSample = append(valuesForSample, value)
} else if count > 0 && result.Offset[count-1] > offset {
panic("sample data provided to generateData must be ordered by timestamp.")
} else {
// Compute variance for previous sample if there was more than one
// value.
if len(valuesForSample) > 1 {
result.Variance[count-1] = computeVariance()
} else {
valuesForSample = valuesForSample[:0]
}
result.Offset = append(result.Offset, offset)
result.Last = append(result.Last, value)
result.First = append(result.First, value)
result.Count = append(result.Count, 1)
result.Sum = append(result.Sum, value)
result.Min = append(result.Min, value)
result.Max = append(result.Max, value)
result.Variance = append(result.Variance, 0)
valuesForSample = append(valuesForSample, value)
}
}
// Compute variance for last sample.
result.Variance[len(result.Variance)-1] = computeVariance()
if !isRollup {
result.First = nil
result.Count = nil
result.Sum = nil
result.Min = nil
result.Max = nil
result.Variance = nil
}
return result
}
func TestMakeInternalData(t *testing.T) {
defer leaktest.AfterTest(t)()
data := []tspb.TimeSeriesDatapoint{
tsdp(110, 20),
tsdp(120, 300),
tsdp(130, 400),
tsdp(140, 800),
tsdp(180, 200),
tsdp(190, 240),
tsdp(210, 500),
tsdp(230, 490),
tsdp(320, 590),
tsdp(350, 990),
}
// Confirm non-rollup case.
nonRollupRow := makeInternalRowData(50, 10, data)
nonRollupColumn := makeInternalColumnData(50, 10, data)
expectedNonRollupRow := roachpb.InternalTimeSeriesData{
StartTimestampNanos: 50,
SampleDurationNanos: 10,
}
expectedNonRollupColumn := expectedNonRollupRow
for _, val := range data {
offset := int32((val.TimestampNanos - 50) / 10)
expectedNonRollupRow.Samples = append(expectedNonRollupRow.Samples, roachpb.InternalTimeSeriesSample{
Offset: offset,
Count: 1,
Sum: val.Value,
})
expectedNonRollupColumn.Offset = append(expectedNonRollupColumn.Offset, offset)
expectedNonRollupColumn.Last = append(expectedNonRollupColumn.Last, val.Value)
}
if a, e := nonRollupRow, expectedNonRollupRow; !reflect.DeepEqual(a, e) {
t.Errorf("nonRollupRow got %v, wanted %v", a, e)
}
if a, e := nonRollupColumn, expectedNonRollupColumn; !reflect.DeepEqual(a, e) {
t.Errorf("nonRollupColumn got %v, wanted %v", a, e)
}
// Confirm rollup-generating case. Values are checked against the
// independently-verified methods of the testmodel package.
rollupRow := makeInternalRowData(50, 50, data)
rollupColumn := makeInternalColumnData(50, 50, data)
expectedRollupRow := roachpb.InternalTimeSeriesData{
StartTimestampNanos: 50,
SampleDurationNanos: 50,
}
expectedRollupColumn := expectedRollupRow
dataSeries := testmodel.DataSeries(data)
// Last and Offset column.
for _, dp := range dataSeries.GroupByResolution(50, testmodel.AggregateLast) {
offset := int32((dp.TimestampNanos - 50) / 50)
expectedRollupRow.Samples = append(expectedRollupRow.Samples, roachpb.InternalTimeSeriesSample{
Offset: offset,
})
expectedRollupColumn.Offset = append(expectedRollupColumn.Offset, offset)
expectedRollupColumn.Last = append(expectedRollupColumn.Last, dp.Value)
}
// Sum column.
for i, dp := range dataSeries.GroupByResolution(50, testmodel.AggregateSum) {
expectedRollupRow.Samples[i].Sum = dp.Value
expectedRollupColumn.Sum = append(expectedRollupColumn.Sum, dp.Value)
}
// Max column.
for i, dp := range dataSeries.GroupByResolution(50, testmodel.AggregateMax) {
expectedRollupRow.Samples[i].Max = proto.Float64(dp.Value)
expectedRollupColumn.Max = append(expectedRollupColumn.Max, dp.Value)
}
// Min column.
for i, dp := range dataSeries.GroupByResolution(50, testmodel.AggregateMin) {
expectedRollupRow.Samples[i].Min = proto.Float64(dp.Value)
expectedRollupColumn.Min = append(expectedRollupColumn.Min, dp.Value)
}
// Count column.
for i, dp := range dataSeries.GroupByResolution(50, func(ds testmodel.DataSeries) float64 {
return float64(len(ds))
}) {
count := uint32(int32(dp.Value))
expectedRollupRow.Samples[i].Count = count
// Min and max are omitted from samples with a count of 1.
if count < 2 {
expectedRollupRow.Samples[i].Min = nil
expectedRollupRow.Samples[i].Max = nil
}
expectedRollupColumn.Count = append(expectedRollupColumn.Count, count)
}
// First column.
for _, dp := range dataSeries.GroupByResolution(50, testmodel.AggregateFirst) {
expectedRollupColumn.First = append(expectedRollupColumn.First, dp.Value)
}
// Variance column.
for _, dp := range dataSeries.GroupByResolution(50, testmodel.AggregateVariance) {
expectedRollupColumn.Variance = append(expectedRollupColumn.Variance, dp.Value)
}
if a, e := rollupRow, expectedRollupRow; !reflect.DeepEqual(a, e) {
t.Errorf("rollupRow got %v, wanted %v", a, e)
for _, diff := range pretty.Diff(a, e) {
t.Error(diff)
}
}
if a, e := rollupColumn, expectedRollupColumn; !reflect.DeepEqual(a, e) {
t.Errorf("rollupColumn got %v, wanted %v", a, e)
for _, diff := range pretty.Diff(a, e) {
t.Error(diff)
}
}
}
func verifySpanIteratorPosition(t *testing.T, actual, expected timeSeriesSpanIterator) {
t.Helper()
if a, e := actual.total, expected.total; a != e {
t.Errorf("iterator had total index of %d, wanted %d", a, e)
}
if a, e := actual.inner, expected.inner; a != e {
t.Errorf("iterator had inner index of %d, wanted %d", a, e)
}
if a, e := actual.outer, expected.outer; a != e {
t.Errorf("iterator had outer index of %d, wanted %d", a, e)
}
if a, e := actual.timestamp, expected.timestamp; a != e {
t.Errorf("iterator had timestamp of %d, wanted %d", a, e)
}
if a, e := actual.length, expected.length; a != e {
t.Errorf("iterator had length of %d, wanted %d", a, e)
}
}
func TestTimeSeriesSpanIteratorMovement(t *testing.T) {
defer leaktest.AfterTest(t)()
// initialize explicit iterator results for the entire span - this makes the
// movement tests easier to read, as we are often asserting that the same
// position.
explicitPositions := []timeSeriesSpanIterator{
{
timestamp: 10,
length: 6,
},
{
total: 1,
outer: 0,
inner: 1,
timestamp: 20,
length: 6,
},
{
total: 2,
outer: 1,
inner: 0,
timestamp: 30,
length: 6,
},
{
total: 3,
outer: 2,
inner: 0,
timestamp: 50,
length: 6,
},
{
total: 4,
outer: 2,
inner: 1,
timestamp: 70,
length: 6,
},
{
total: 5,
outer: 2,
inner: 2,
timestamp: 90,
length: 6,
},
{
total: 6,
outer: 3,
inner: 0,
timestamp: 0,
length: 6,
},
}
// Initial position.
verifyIterTest := func(t *testing.T, iter timeSeriesSpanIterator) {
verifySpanIteratorPosition(t, iter, explicitPositions[0])
// Forwarding.
iter.forward()
verifySpanIteratorPosition(t, iter, explicitPositions[1])
iter.forward()
verifySpanIteratorPosition(t, iter, explicitPositions[2])
iter.forward()
iter.forward()
iter.forward()
iter.forward()
verifySpanIteratorPosition(t, iter, explicitPositions[6])
iter.forward()
verifySpanIteratorPosition(t, iter, explicitPositions[6])
// Backwards.
iter.backward()
verifySpanIteratorPosition(t, iter, explicitPositions[5])
iter.backward()
iter.backward()
iter.backward()
iter.backward()
verifySpanIteratorPosition(t, iter, explicitPositions[1])
iter.backward()
iter.backward()
iter.backward()
verifySpanIteratorPosition(t, iter, explicitPositions[0])
// Seek index.
iter.seekIndex(2)
verifySpanIteratorPosition(t, iter, explicitPositions[2])
iter.seekIndex(4)
verifySpanIteratorPosition(t, iter, explicitPositions[4])
iter.seekIndex(0)
verifySpanIteratorPosition(t, iter, explicitPositions[0])
iter.seekIndex(1000)
verifySpanIteratorPosition(t, iter, explicitPositions[6])
iter.seekIndex(-1)
verifySpanIteratorPosition(t, iter, explicitPositions[0])
// Seek timestamp.
iter.seekTimestamp(0)
verifySpanIteratorPosition(t, iter, explicitPositions[0])
iter.seekTimestamp(15)
verifySpanIteratorPosition(t, iter, explicitPositions[1])
iter.seekTimestamp(50)
verifySpanIteratorPosition(t, iter, explicitPositions[3])
iter.seekTimestamp(80)
verifySpanIteratorPosition(t, iter, explicitPositions[5])
iter.seekTimestamp(10000)
verifySpanIteratorPosition(t, iter, explicitPositions[6])
}
// Row data only.
t.Run("row only", func(t *testing.T) {
verifyIterTest(t, makeTimeSeriesSpanIterator(timeSeriesSpan{
makeInternalRowData(0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
}),
makeInternalRowData(30, 10, []tspb.TimeSeriesDatapoint{
tsdp(30, 3),
}),
makeInternalRowData(50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(70, 7),
tsdp(90, 9),
}),
}))
})
t.Run("columns only", func(t *testing.T) {
verifyIterTest(t, makeTimeSeriesSpanIterator(timeSeriesSpan{
makeInternalColumnData(0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
}),
makeInternalColumnData(30, 10, []tspb.TimeSeriesDatapoint{
tsdp(30, 3),
}),
makeInternalColumnData(50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(70, 7),
tsdp(90, 9),
}),
}))
})
t.Run("mixed rows and columns", func(t *testing.T) {
verifyIterTest(t, makeTimeSeriesSpanIterator(timeSeriesSpan{
makeInternalRowData(0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
}),
makeInternalColumnData(30, 10, []tspb.TimeSeriesDatapoint{
tsdp(30, 3),
}),
makeInternalRowData(50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(70, 7),
tsdp(90, 9),
}),
}))
})
}
func TestTimeSeriesSpanIteratorValues(t *testing.T) {
defer leaktest.AfterTest(t)()
iter := makeTimeSeriesSpanIterator(timeSeriesSpan{
makeInternalRowData(0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
}),
makeInternalRowData(30, 10, []tspb.TimeSeriesDatapoint{
tsdp(30, 3),
tsdp(30, 6),
tsdp(30, 9),
}),
makeInternalRowData(50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 12),
tsdp(70, 700),
tsdp(90, 9),
}),
})
iter.seekTimestamp(30)
for _, tc := range []struct {
agg tspb.TimeSeriesQueryAggregator
expected float64
expectedDeriv float64
}{
{
agg: tspb.TimeSeriesQueryAggregator_AVG,
expected: 6,
expectedDeriv: 3,
},
{
agg: tspb.TimeSeriesQueryAggregator_SUM,
expected: 18,
expectedDeriv: 12,
},
{
agg: tspb.TimeSeriesQueryAggregator_MIN,
expected: 3,
expectedDeriv: 1,
},
{
agg: tspb.TimeSeriesQueryAggregator_MAX,
expected: 9,
expectedDeriv: 5,
},
} {
t.Run("value", func(t *testing.T) {
if a, e := iter.value(tc.agg), tc.expected; a != e {
t.Errorf("value for %s of iter got %f, wanted %f", tc.agg.String(), a, e)
}
deriv, valid := iter.derivative(tc.agg)
if !valid {
t.Errorf("expected derivative to be valid, was invalid")
}
if a, e := deriv, tc.expectedDeriv; a != e {
t.Errorf("derivative for %s of iter got %f, wanted %f", tc.agg.String(), a, e)
}
})
}
// Test value interpolation.
iter.seekTimestamp(50)
for _, tc := range []struct {
timestamp int64
interpolationLimit int64
expectedValid bool
expectedValue float64
}{
{50, 100, true, 12},
{50, 1, true, 12},
// Must interpolate in between points.
{30, 100, false, 0},
{60, 100, false, 0},
// Interpolation limit is respected
{40, 100, true, 9},
{40, 20, true, 9},
{40, 19, false, 0},
// Interpolation limit 0 is still a special case.
{40, 0, true, 9},
} {
interpValue, valid := iter.valueAtTimestamp(tc.timestamp, tc.interpolationLimit, tspb.TimeSeriesQueryAggregator_AVG)
if valid != tc.expectedValid {
t.Errorf("valueAtTimestamp valid was %t, wanted %t", valid, tc.expectedValid)
continue
}
if a, e := interpValue, tc.expectedValue; a != e {
t.Errorf("valueAtTimestamp %d got %f, wanted %f", tc.timestamp, a, e)
}
}
// Special case: no derivative available at index 0.
iter.seekIndex(0)
if _, valid := iter.valueAtTimestamp(20, 1000, tspb.TimeSeriesQueryAggregator_AVG); valid {
t.Errorf("expected valueAtTimestamp to be invalid at index 0, was valid")
}
if _, valid := iter.derivative(tspb.TimeSeriesQueryAggregator_AVG); valid {
t.Errorf("expected deriv to be invalid at index 0, was valid")
}
}
// dataDesc is used to describe an internal data structure independently of it
// being formatted using rows or columns.
type dataDesc struct {
startTimestamp int64
sampleDuration int64
samples []tspb.TimeSeriesDatapoint
}
func TestDownsampleSpans(t *testing.T) {
defer leaktest.AfterTest(t)()
// Each test case is structured as such:
// + A description of an "input" span, which describes a list of
// InternalTimeSeriesData structures that will be assembled into a data span.
// Each structure has a start timestamp, a sample period (should be the same
// for all structure), and a set of data samples.
// + A sample period, which should be greater than or equal to the sample
// period of the input span structures.
// + A downsampler operation.
// + A description of an "expected" span, which describes the list of
// InternalTimeSeriesData structures that should result from running the input
// span through the downsampling operation.
//
// Importantly, both the "input" and "expected" spans are defined using the
// dataDesc structure, rather than explicitly creating InternalTimeSeriesData
// structures. This is because we want to test both the row format and the
// columnar format of InternalTimeSeriesData when downsampling - therefore,
// using the descriptors, each test case is run using first row-structured
// data, then column-structured data, and finally a mixed-format test which
// combines the two. This gives us a very broad test area while still
// maintaining a compact set of test cases.
for tcnum, tc := range []struct {
inputDesc []dataDesc
samplePeriod int64
downsampler tspb.TimeSeriesQueryAggregator
expectedDesc []dataDesc
}{
// Original sample period, average downsampler.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
},
samplePeriod: 10,
downsampler: tspb.TimeSeriesQueryAggregator_AVG,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 3),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
},
},
// Original sample period, max downsampler. Should fill in max value.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
},
samplePeriod: 10,
downsampler: tspb.TimeSeriesQueryAggregator_MAX,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
},
},
// Original sample period, min downsampler.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
},
samplePeriod: 10,
downsampler: tspb.TimeSeriesQueryAggregator_MIN,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
},
},
// AVG downsamper. Should re-use original span data.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
{70, 10, []tspb.TimeSeriesDatapoint{
tsdp(70, 7),
tsdp(90, 9),
tsdp(110, 8),
}},
},
samplePeriod: 50,
downsampler: tspb.TimeSeriesQueryAggregator_AVG,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(0, 3),
tsdp(50, 6.75),
tsdp(100, 8),
}},
},
},
// MAX downsamper. Should re-use original span data; note that the sum and
// count values are NOT overwritten.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
{70, 10, []tspb.TimeSeriesDatapoint{
tsdp(70, 7),
tsdp(90, 9),
tsdp(110, 8),
}},
},
samplePeriod: 50,
downsampler: tspb.TimeSeriesQueryAggregator_MAX,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(0, 5),
tsdp(50, 9),
tsdp(100, 8),
}},
},
},
// MIN downsamper. Should re-use original span data; note that the sum and
// count values are NOT overwritten.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
tsdp(20, 2),
tsdp(20, 4),
tsdp(30, 5),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
{70, 10, []tspb.TimeSeriesDatapoint{
tsdp(70, 7),
tsdp(90, 9),
tsdp(110, 8),
}},
},
samplePeriod: 50,
downsampler: tspb.TimeSeriesQueryAggregator_MIN,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(0, 1),
tsdp(50, 5),
tsdp(100, 8),
}},
},
},
// AVG downsampler, downsampling while re-using multiple
// InternalTimeSeriesData structures.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
{70, 10, []tspb.TimeSeriesDatapoint{
tsdp(70, 7),
tsdp(90, 9),
tsdp(110, 8),
}},
},
samplePeriod: 50,
downsampler: tspb.TimeSeriesQueryAggregator_AVG,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(0, 1),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 6.75),
tsdp(100, 8),
}},
},
},
// MAX downsampler, downsampling while re-using multiple
// InternalTimeSeriesData structures.
{
inputDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(10, 1),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 5),
tsdp(60, 6),
}},
{70, 10, []tspb.TimeSeriesDatapoint{
tsdp(70, 7),
tsdp(90, 9),
tsdp(110, 8),
}},
},
samplePeriod: 50,
downsampler: tspb.TimeSeriesQueryAggregator_MAX,
expectedDesc: []dataDesc{
{0, 10, []tspb.TimeSeriesDatapoint{
tsdp(0, 1),
}},
{50, 10, []tspb.TimeSeriesDatapoint{
tsdp(50, 9),
tsdp(100, 8),
}},
},
},
} {
// Run case in Row format.
t.Run(fmt.Sprintf("%d:Row", tcnum), func(t *testing.T) {
span := make(timeSeriesSpan, len(tc.inputDesc))
for i, desc := range tc.inputDesc {
span[i] = makeInternalRowData(desc.startTimestamp, desc.sampleDuration, desc.samples)
}
expectedSpan := make(timeSeriesSpan, len(tc.expectedDesc))
for i, desc := range tc.expectedDesc {
expectedSpan[i] = makeInternalRowData(desc.startTimestamp, desc.sampleDuration, desc.samples)
}
spans := map[string]timeSeriesSpan{
"test": span,
}
downsampleSpans(spans, tc.samplePeriod, tc.downsampler)
if a, e := spans["test"], expectedSpan; !reflect.DeepEqual(a, e) {
for _, diff := range pretty.Diff(a, e) {
t.Error(diff)
}
}
})
// Run case in Column format.
t.Run(fmt.Sprintf("%d:Column", tcnum), func(t *testing.T) {
span := make(timeSeriesSpan, len(tc.inputDesc))
for i, desc := range tc.inputDesc {
span[i] = makeInternalColumnData(desc.startTimestamp, desc.sampleDuration, desc.samples)
}
expectedSpan := make(timeSeriesSpan, len(tc.expectedDesc))
for i, desc := range tc.expectedDesc {
expectedSpan[i] = makeInternalColumnData(desc.startTimestamp, desc.sampleDuration, desc.samples)
}
spans := map[string]timeSeriesSpan{
"test": span,
}
downsampleSpans(spans, tc.samplePeriod, tc.downsampler)
if a, e := spans["test"], expectedSpan; !reflect.DeepEqual(a, e) {
for _, diff := range pretty.Diff(a, e) {
t.Error(diff)
}
}
})
// Run case in Mixed format.
t.Run(fmt.Sprintf("%d:Mixed", tcnum), func(t *testing.T) {
span := make(timeSeriesSpan, len(tc.inputDesc))
for i, desc := range tc.inputDesc {
if i%2 == 0 {
span[i] = makeInternalRowData(desc.startTimestamp, desc.sampleDuration, desc.samples)
} else {
span[i] = makeInternalColumnData(desc.startTimestamp, desc.sampleDuration, desc.samples)
}
}
expectedSpan := make(timeSeriesSpan, len(tc.expectedDesc))
for i, desc := range tc.expectedDesc {
if i%2 == 0 {
expectedSpan[i] = makeInternalRowData(desc.startTimestamp, desc.sampleDuration, desc.samples)
} else {
expectedSpan[i] = makeInternalColumnData(desc.startTimestamp, desc.sampleDuration, desc.samples)
}
}
spans := map[string]timeSeriesSpan{
"test": span,
}
downsampleSpans(spans, tc.samplePeriod, tc.downsampler)
if a, e := spans["test"], expectedSpan; !reflect.DeepEqual(a, e) {
for _, diff := range pretty.Diff(a, e) {
t.Error(diff)
}
}
})
}
}
|
package reconcilers
import (
"context"
"fmt"
marin3rv1alpha1 "github.com/3scale-ops/marin3r/apis/marin3r/v1alpha1"
xdss "github.com/3scale-ops/marin3r/pkg/discoveryservice/xdss"
envoy "github.com/3scale-ops/marin3r/pkg/envoy"
envoy_resources "github.com/3scale-ops/marin3r/pkg/envoy/resources"
envoy_serializer "github.com/3scale-ops/marin3r/pkg/envoy/serializer"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
secretCertificate = "tls.crt"
secretPrivateKey = "tls.key"
)
type CacheReconciler struct {
ctx context.Context
logger logr.Logger
client client.Client
xdsCache xdss.Cache
decoder envoy_serializer.ResourceUnmarshaller
generator envoy_resources.Generator
}
func NewCacheReconciler(ctx context.Context, logger logr.Logger, client client.Client, xdsCache xdss.Cache,
decoder envoy_serializer.ResourceUnmarshaller, generator envoy_resources.Generator) CacheReconciler {
return CacheReconciler{ctx, logger, client, xdsCache, decoder, generator}
}
func (r *CacheReconciler) Reconcile(req types.NamespacedName, resources *marin3rv1alpha1.EnvoyResources, nodeID, version string) (*marin3rv1alpha1.VersionTracker, error) {
snap, err := r.GenerateSnapshot(req, resources)
if err != nil {
return nil, err
}
oldSnap, err := r.xdsCache.GetSnapshot(nodeID)
if err != nil || areDifferent(snap, oldSnap) {
r.logger.Info("Writing new snapshot to xDS cache", "Revision", version, "NodeID", nodeID)
if err := r.xdsCache.SetSnapshot(nodeID, snap); err != nil {
return nil, err
}
}
return &marin3rv1alpha1.VersionTracker{
Endpoints: snap.GetVersion(envoy.Endpoint),
Clusters: snap.GetVersion(envoy.Cluster),
Routes: snap.GetVersion(envoy.Route),
Listeners: snap.GetVersion(envoy.Listener),
Secrets: snap.GetVersion(envoy.Secret),
Runtimes: snap.GetVersion(envoy.Runtime),
}, nil
}
func (r *CacheReconciler) GenerateSnapshot(req types.NamespacedName, resources *marin3rv1alpha1.EnvoyResources) (xdss.Snapshot, error) {
snap := r.xdsCache.NewSnapshot("")
for idx, endpoint := range resources.Endpoints {
res := r.generator.New(envoy.Endpoint)
if err := r.decoder.Unmarshal(endpoint.Value, res); err != nil {
return nil,
resourceLoaderError(
req, endpoint.Value, field.NewPath("spec", "resources").Child("endpoint").Index(idx).Child("value"),
fmt.Sprintf("Invalid envoy resource value: '%s'", err),
)
}
snap.SetResource(endpoint.Name, res)
}
for idx, cluster := range resources.Clusters {
res := r.generator.New(envoy.Cluster)
if err := r.decoder.Unmarshal(cluster.Value, res); err != nil {
return nil,
resourceLoaderError(
req, cluster.Value, field.NewPath("spec", "resources").Child("clusters").Index(idx).Child("value"),
fmt.Sprintf("Invalid envoy resource value: '%s'", err),
)
}
snap.SetResource(cluster.Name, res)
}
for idx, route := range resources.Routes {
res := r.generator.New(envoy.Route)
if err := r.decoder.Unmarshal(route.Value, res); err != nil {
return nil,
resourceLoaderError(
req, route.Value, field.NewPath("spec", "resources").Child("routes").Index(idx).Child("value"),
fmt.Sprintf("Invalid envoy resource value: '%s'", err),
)
}
snap.SetResource(route.Name, res)
}
for idx, listener := range resources.Listeners {
res := r.generator.New(envoy.Listener)
if err := r.decoder.Unmarshal(listener.Value, res); err != nil {
return nil,
resourceLoaderError(
req, listener.Value, field.NewPath("spec", "resources").Child("listener").Index(idx).Child("value"),
fmt.Sprintf("Invalid envoy resource value: '%s'", err),
)
}
snap.SetResource(listener.Name, res)
}
for idx, runtime := range resources.Runtimes {
res := r.generator.New(envoy.Runtime)
if err := r.decoder.Unmarshal(runtime.Value, res); err != nil {
return nil,
resourceLoaderError(
req, runtime.Value, field.NewPath("spec", "resources").Child("runtime").Index(idx).Child("value"),
fmt.Sprintf("Invalid envoy resource value: '%s'", err),
)
}
snap.SetResource(runtime.Name, res)
}
for idx, secret := range resources.Secrets {
s := &corev1.Secret{}
key := secret.GetSecretKey(req.Namespace)
if err := r.client.Get(r.ctx, key, s); err != nil {
return nil, fmt.Errorf("%s", err.Error())
}
// Validate secret holds a certificate
if s.Type == "kubernetes.io/tls" {
res := r.generator.NewSecret(secret.Name, string(s.Data[secretPrivateKey]), string(s.Data[secretCertificate]))
snap.SetResource(secret.Name, res)
} else {
err := resourceLoaderError(
req, secret.Ref, field.NewPath("spec", "resources").Child("secrets").Index(idx).Child("ref"),
"Only 'kubernetes.io/tls' type secrets allowed",
)
return nil, fmt.Errorf("%s", err.Error())
}
}
return snap, nil
}
func resourceLoaderError(req types.NamespacedName, value interface{}, resPath *field.Path, msg string) error {
return errors.NewInvalid(
schema.GroupKind{Group: "envoy", Kind: "EnvoyConfig"},
fmt.Sprintf("%s/%s", req.Namespace, req.Name),
field.ErrorList{field.Invalid(resPath, value, fmt.Sprint(msg))},
)
}
func areDifferent(a, b xdss.Snapshot) bool {
for _, rType := range []envoy.Type{envoy.Endpoint, envoy.Cluster, envoy.Route, envoy.Listener, envoy.Secret, envoy.Runtime} {
if a.GetVersion(rType) != b.GetVersion(rType) {
return true
}
}
return false
}
|
package repository
import (
"database/sql"
"ehsan_esmaeili/model"
"fmt"
)
type Buy_ChargRepository interface {
Insert(user *model.Buy_Charg) (use *model.GetaUser, err error )
}
type Buy_ChargRepositorySqlServer struct {
db *sql.DB //64b
table string //4b
//68
}
func NewBuy_ChargRepositorySqlServer(table string, db *sql.DB) *UserRepositorySqlServer {
return &UserRepositorySqlServer{
db: db,
table: table,
}
}
func (r *UserRepositorySqlServer) InsertBuy_Charg(user *model.Buy_Charg) (use *model.GetaUser, err error ) {
var a model.GetaUser
err = r.db.QueryRow(" Exec buy_charg_insert ?,?,?",
user.User_Id,
user.Phone_Number,
user.Pric,
).Scan( &a.Error_cod , &a.Id )
fmt.Println(a.Id)
if err != nil {
return nil, err
}
return &a, nil
}
|
package trie
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNodeNew(t *testing.T) {
nd := node{}
assert.NotEqual(t, nil, nd)
assert.Equal(t, 0, len(nd.keys))
}
func TestNodeNode(t *testing.T) {
{
nd := node{}
_, err := nd.node([]string{"foo"}, false)
assert.Equal(t, 0, len(nd.keys))
assert.Equal(t, ErrorNotFound, err)
}
{
nd := node{}
_, err := nd.node([]string{"foo"}, true)
assert.Equal(t, 1, len(nd.keys))
assert.Equal(t, nil, err)
}
}
func TestNodeUpsert(t *testing.T) {
{
nd := node{}
nd.upsert([]string{"foo"}, 23)
assert.NotEqual(t, (nil), nd.keys["foo"])
}
}
func TestNodeData(t *testing.T) {
{
nd := node{}
_, err := nd.data([]string{"foo"})
assert.Equal(t, ErrorNotFound, err)
}
{
nd := node{}
nd.upsert([]string{"foo", "bar"}, 42)
_, err := nd.data([]string{"foo"})
assert.Equal(t, ErrorNoData, err)
}
{
nd := node{}
nd.upsert([]string{"foo"}, 23)
value, err := nd.data([]string{"foo"})
assert.Equal(t, 23, value)
assert.Equal(t, nil, err)
}
}
func TestNodeDelete(t *testing.T) {
{
nd := node{}
_, err := nd.delete([]string{"foo"})
assert.Equal(t, ErrorNotFound, err)
}
{
nd := node{}
nd.upsert([]string{"foo"}, 23)
_, err := nd.delete([]string{"foo"})
assert.Equal(t, nil, err)
}
{
nd := node{}
nd.upsert([]string{"foo"}, 23)
_, err := nd.delete([]string{"foo", "bar"})
assert.Equal(t, ErrorNotFound, err)
}
}
|
package prompt
import "github.com/AlecAivazis/survey/v2"
var loginQ = []*survey.Question{
{
Name: "username",
Prompt: &survey.Input{Message: promptLoginUsername},
Validate: survey.Required,
Transform: survey.Title,
},
{
Name: "password",
Prompt: &survey.Password{
Message: promptLoginPassword,
},
},
}
// LoginA is the structure used for login questions
type LoginA struct {
Username string
Password string
}
// LoginPrompt is a custom login prompt for Dessert
func LoginPrompt(loginA *LoginA) error {
return survey.Ask(loginQ, loginA, survey.WithIcons(customPrompt))
}
|
package binance
import (
"encoding/json"
"fmt"
"github.com/go-kit/kit/log/level"
"github.com/gorilla/websocket"
"strings"
)
func (as *apiService) DepthWebsocketLevel(dwr DepthWebsocketRequestLevel) (chan *DepthLevelEvent, chan struct{}, error) {
if dwr.Level == 0 {
dwr.Level = 5
}
url := fmt.Sprintf("wss://stream.binance.com:9443/ws/%s@depth%d", strings.ToLower(dwr.Symbol), dwr.Level)
c, _, err := websocket.DefaultDialer.Dial(url, nil)
if err != nil {
return nil, nil, err
}
//{"lastUpdateId":176319407,
// "bids":[["0.00258680","21.03000000",[]],["0.00258650","2.29000000",[]],["0.00258640","367.74000000",[]],["0.00258630","310.58000000",[]],["0.00258520","103.37000000",[]]],
// "asks":[["0.00258800","9.68000000",[]],["0.00258860","4.08000000",[]],["0.00258900","50.57000000",[]],["0.00258920","0.47000000",[]],["0.00258940","573.51000000",[]]]}
done := make(chan struct{})
dech := make(chan *DepthLevelEvent)
go func() {
defer c.Close()
defer close(done)
for {
select {
case <-as.Ctx.Done():
level.Info(as.Logger).Log("closing reader")
return
default:
_, message, err := c.ReadMessage()
if err != nil {
level.Error(as.Logger).Log("wsRead", err)
return
}
rawDepth := struct {
//Type string `json:"-"`
//Time float64 `json:"-"`
//Symbol string `json:"-"`
UpdateID int `json:"lastUpdateId"`
BidDepthDelta [][]interface{} `json:"bids"`
AskDepthDelta [][]interface{} `json:"asks"`
}{}
if err := json.Unmarshal(message, &rawDepth); err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
//t, err := timeFromUnixTimestampFloat(rawDepth.Time)
//if err != nil {
// level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
//
// return
//}
de := &DepthLevelEvent{
//WSEvent: WSEvent{
// Type: rawDepth.Type,
// Time: t,
// Symbol: rawDepth.Symbol,
//},
UpdateID: rawDepth.UpdateID,
OrderBook: OrderBook{
LastUpdateID: rawDepth.UpdateID,
},
}
for _, b := range rawDepth.BidDepthDelta {
p, err := floatFromString(b[0])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
q, err := floatFromString(b[1])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
de.Bids = append(de.Bids, &Order{
Price: p,
Quantity: q,
})
}
for _, a := range rawDepth.AskDepthDelta {
p, err := floatFromString(a[0])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
q, err := floatFromString(a[1])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
de.Asks = append(de.Asks, &Order{
Price: p,
Quantity: q,
})
}
dech <- de
}
}
}()
go as.exitHandler(c, done)
return dech, done, nil
}
func (as *apiService) DepthWebsocketStream(dwr DepthWebsocketRequestStream) (chan *DepthStreamEvent, chan struct{}, error) {
if len(dwr.Streams) == 0 {
return nil, nil, fmt.Errorf("streams is empty")
}
streams := ""
for _, stream := range dwr.Streams {
if streams != "" {
streams += "/"
}
streams += strings.ToLower(stream)
}
//url := fmt.Sprintf("wss://stream.binance.com:9443/stream?streams=ethbtc@depth5/bnbbtc@depth5")
url := fmt.Sprintf("wss://stream.binance.com:9443/stream?streams=%s", streams)
c, _, err := websocket.DefaultDialer.Dial(url, nil)
if err != nil {
return nil, nil, err
}
//{"stream":"bnbbtc@depth5",
// "data":{"lastUpdateId":177304217,
// "bids":[["0.00247730","0.44000000",[]],["0.00247610","104.68000000",[]],["0.00247600","334.55000000",[]],["0.00247560","119.41000000",[]],["0.00247500","816.14000000",[]]],"asks":[["0.00247800","380.30000000",[]],["0.00247840","10.17000000",[]],["0.00247930","293.62000000",[]],["0.00247940","200.00000000",[]],["0.00247950","4.09000000",[]]]}}
done := make(chan struct{})
dech := make(chan *DepthStreamEvent)
go func() {
defer c.Close()
defer close(done)
for {
select {
case <-as.Ctx.Done():
level.Info(as.Logger).Log("closing reader")
return
default:
_, message, err := c.ReadMessage()
if err != nil {
level.Error(as.Logger).Log("wsRead", err)
return
}
rawDepth := struct {
//Type string `json:"-"`
//Time float64 `json:"-"`
//Symbol string `json:"-"`
Stream string `json:"stream"`
Data struct {
UpdateID int `json:"lastUpdateId"`
BidDepthDelta [][]interface{} `json:"bids"`
AskDepthDelta [][]interface{} `json:"asks"`
} `json:data`
}{}
if err := json.Unmarshal(message, &rawDepth); err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
//t, err := timeFromUnixTimestampFloat(rawDepth.Time)
//if err != nil {
// level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
//
// return
//}
de := &DepthStreamEvent{
//WSEvent: WSEvent{
// Type: rawDepth.Type,
// Time: t,
// Symbol: rawDepth.Symbol,
//},
Stream: rawDepth.Stream,
UpdateID: rawDepth.Data.UpdateID,
OrderBook: OrderBook{
LastUpdateID: rawDepth.Data.UpdateID,
},
}
for _, b := range rawDepth.Data.BidDepthDelta {
p, err := floatFromString(b[0])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
q, err := floatFromString(b[1])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
de.Bids = append(de.Bids, &Order{
Price: p,
Quantity: q,
})
}
for _, a := range rawDepth.Data.AskDepthDelta {
p, err := floatFromString(a[0])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
q, err := floatFromString(a[1])
if err != nil {
level.Error(as.Logger).Log("wsUnmarshal", err, "body", string(message))
return
}
de.Asks = append(de.Asks, &Order{
Price: p,
Quantity: q,
})
}
dech <- de
}
}
}()
go as.exitHandler(c, done)
return dech, done, nil
}
|
//go:generate mockgen -destination mock/move.go . MoveHandler
package handlers
import (
"context"
"path/filepath"
"github.com/k0kubun/pp"
"github.com/raba-jp/primus/pkg/cli/ui"
"github.com/spf13/afero"
"go.uber.org/zap"
"golang.org/x/xerrors"
)
type MoveParams struct {
Src string
Dest string
Cwd string
}
func (p *MoveParams) String() string {
return pp.Sprintf("%v\n", p)
}
type MoveHandler interface {
Move(ctx context.Context, dryrun bool, p *MoveParams) error
}
type MoveHandlerFunc func(ctx context.Context, dryrun bool, p *MoveParams) error
func (f MoveHandlerFunc) Move(ctx context.Context, dryrun bool, p *MoveParams) error {
return f(ctx, dryrun, p)
}
func NewMove(fs afero.Fs) MoveHandler {
return MoveHandlerFunc(func(ctx context.Context, dryrun bool, p *MoveParams) error {
if dryrun {
ui.Printf("mv %s %s\n", p.Src, p.Dest)
return nil
}
if !filepath.IsAbs(p.Src) {
p.Src = filepath.Join(p.Cwd, p.Src)
}
if !filepath.IsAbs(p.Dest) {
p.Dest = filepath.Join(p.Cwd, p.Dest)
}
if err := fs.Rename(p.Src, p.Dest); err != nil {
return xerrors.Errorf("Failed to move file: %s => %s: %w", p.Src, p.Dest, err)
}
zap.L().Info(
"moved file",
zap.String("source", p.Src),
zap.String("destination", p.Dest),
)
return nil
})
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package store
import (
"fmt"
"strings"
"testing"
"time"
"github.com/mattermost/mattermost-cloud/internal/testlib"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pborman/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
mmv1alpha1 "github.com/mattermost/mattermost-operator/apis/mattermost/v1alpha1"
corev1 "k8s.io/api/core/v1"
)
func TestInstallations(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
ownerID1 := model.NewID()
ownerID2 := model.NewID()
groupID2 := model.NewID()
group1 := &model.Group{
Version: "group1-version",
Image: "custom/image",
MattermostEnv: model.EnvVarMap{
"Key1": model.EnvVar{Value: "Value1"},
},
}
err := sqlStore.CreateGroup(group1, nil)
require.NoError(t, err)
groupID1 := group1.ID
time.Sleep(1 * time.Millisecond)
annotations := []*model.Annotation{{Name: "annotation1"}, {Name: "annotation2"}}
installation1 := &model.Installation{
Name: "test1",
OwnerID: ownerID1,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID1,
CRVersion: model.V1betaCRVersion,
State: model.InstallationStateCreationRequested,
PriorityEnv: model.EnvVarMap{
"V1": model.EnvVar{
Value: "test",
},
},
}
err = sqlStore.CreateInstallation(installation1, annotations, fixDNSRecords(0))
require.NoError(t, err)
t.Run("get installation", func(t *testing.T) {
fetched, errTest := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, errTest)
assert.Equal(t, installation1, fetched)
})
t.Run("fail on not unique DNS", func(t *testing.T) {
errTest := sqlStore.CreateInstallation(&model.Installation{}, nil, fixDNSRecords(0))
require.Error(t, errTest)
assert.Contains(t, strings.ToLower(errTest.Error()), "unique constraint")
})
t.Run("fail on not unique Name", func(t *testing.T) {
errTest := sqlStore.CreateInstallation(&model.Installation{Name: "test1"}, nil, fixDNSRecords(11))
require.Error(t, errTest)
assert.Contains(t, strings.ToLower(errTest.Error()), "unique constraint")
})
time.Sleep(1 * time.Millisecond)
installation2 := &model.Installation{
Name: "test2",
OwnerID: ownerID1,
Version: "version2",
Image: "custom-image",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID2,
CRVersion: model.DefaultCRVersion,
State: model.InstallationStateStable,
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(1))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation3 := &model.Installation{
Name: "test3",
OwnerID: ownerID2,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID1,
State: model.InstallationStateCreationRequested,
}
dnsRecords3 := fixDNSRecords(3)
err = sqlStore.CreateInstallation(installation3, nil, dnsRecords3)
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
dbConfig := model.SingleTenantDatabaseConfig{
PrimaryInstanceType: "db.r5.large",
ReplicaInstanceType: "db.r5.medium",
ReplicasCount: 4,
}
installation4 := &model.Installation{
Name: "test4",
OwnerID: ownerID2,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID2,
State: model.InstallationStateCreationRequested,
SingleTenantDatabaseConfig: &dbConfig,
}
err = sqlStore.CreateInstallation(installation4, nil, fixDNSRecords(4))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation5 := &model.Installation{
Name: "test5",
OwnerID: ownerID2,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateCreationRequested,
ExternalDatabaseConfig: &model.ExternalDatabaseConfig{SecretName: "test-secret"},
}
err = sqlStore.CreateInstallation(installation5, nil, fixDNSRecords(5))
require.NoError(t, err)
t.Run("get unknown installation", func(t *testing.T) {
installation, err := sqlStore.GetInstallation("unknown", false, false)
require.NoError(t, err)
require.Nil(t, installation)
})
t.Run("get installation 1", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation1, installation)
})
t.Run("get installation 2", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation2, installation)
})
t.Run("get installation 3", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation3.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation3, installation)
})
t.Run("get and delete installation 4", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation4.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation4, installation)
err = sqlStore.DeleteInstallation(installation4.ID)
require.NoError(t, err)
installation4, err = sqlStore.GetInstallation(installation4.ID, false, false)
require.NoError(t, err)
})
t.Run("get and delete installation 5", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation5.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation5, installation)
err = sqlStore.DeleteInstallation(installation5.ID)
require.NoError(t, err)
installation5, err = sqlStore.GetInstallation(installation5.ID, false, false)
require.NoError(t, err)
})
t.Run("groups", func(t *testing.T) {
group, err := sqlStore.GetGroup(groupID1)
require.NoError(t, err)
require.Equal(t, group1, group)
t.Run("include group config and overrides", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation1.ID, true, true)
require.NoError(t, err)
mergedInstallation := installation1.Clone()
mergedInstallation.MergeWithGroup(group, true)
require.Equal(t, mergedInstallation, installation)
})
t.Run("include group config, no overrides", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation1.ID, true, false)
require.NoError(t, err)
mergedInstallation := installation1.Clone()
mergedInstallation.MergeWithGroup(group, false)
require.Equal(t, mergedInstallation, installation)
})
})
testCases := []struct {
Description string
Filter *model.InstallationFilter
Expected []*model.Installation
}{
{
"page 0, perPage 0",
&model.InstallationFilter{
Paging: model.Paging{
Page: 0,
PerPage: 0,
IncludeDeleted: false,
},
},
nil,
},
{
"page 0, perPage 1",
&model.InstallationFilter{
Paging: model.Paging{
Page: 0,
PerPage: 1,
IncludeDeleted: false,
},
},
[]*model.Installation{installation1},
},
{
"page 0, perPage 10",
&model.InstallationFilter{
Paging: model.Paging{
Page: 0,
PerPage: 10,
IncludeDeleted: false,
},
},
[]*model.Installation{installation1, installation2, installation3},
},
{
"page 0, perPage 10, include deleted",
&model.InstallationFilter{
Paging: model.Paging{
Page: 0,
PerPage: 10,
IncludeDeleted: true,
},
},
[]*model.Installation{installation1, installation2, installation3, installation4, installation5},
},
{
"owner 1",
&model.InstallationFilter{
OwnerID: ownerID1,
Paging: model.AllPagesNotDeleted(),
},
[]*model.Installation{installation1, installation2},
},
{
"owner 1, include deleted",
&model.InstallationFilter{
OwnerID: ownerID1,
Paging: model.AllPagesWithDeleted(),
},
[]*model.Installation{installation1, installation2},
},
{
"owner 2",
&model.InstallationFilter{
OwnerID: ownerID2,
Paging: model.AllPagesNotDeleted(),
},
[]*model.Installation{installation3},
},
{
"owner 2, include deleted",
&model.InstallationFilter{
OwnerID: ownerID2,
Paging: model.AllPagesWithDeleted(),
},
[]*model.Installation{installation3, installation4, installation5},
},
{
"group 1",
&model.InstallationFilter{
GroupID: groupID1,
Paging: model.AllPagesWithDeleted(),
},
[]*model.Installation{installation1, installation3},
},
{
"owner 2, group 2, include deleted",
&model.InstallationFilter{
OwnerID: ownerID2,
GroupID: groupID2,
Paging: model.AllPagesWithDeleted(),
},
[]*model.Installation{installation4},
},
{
"dns 3",
&model.InstallationFilter{
DNS: "dns-3.example.com",
Paging: model.AllPagesNotDeleted(),
},
[]*model.Installation{installation3},
},
{
"state stable",
&model.InstallationFilter{
State: model.InstallationStateStable,
Paging: model.AllPagesNotDeleted(),
},
[]*model.Installation{installation2},
},
{
"state creation-requested",
&model.InstallationFilter{
State: model.InstallationStateCreationRequested,
Paging: model.AllPagesNotDeleted(),
},
[]*model.Installation{installation1, installation3},
},
{
"with name",
&model.InstallationFilter{
Paging: model.AllPagesNotDeleted(),
Name: "test1",
},
[]*model.Installation{installation1},
},
}
for _, testCase := range testCases {
t.Run(testCase.Description, func(t *testing.T) {
actual, err := sqlStore.GetInstallations(testCase.Filter, false, false)
require.NoError(t, err)
require.Equal(t, testCase.Expected, actual)
})
}
}
func TestGetUnlockedInstallationPendingWork(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
ownerID := model.NewID()
groupID := model.NewID()
creationRequestedInstallation := &model.Installation{
Name: "test",
OwnerID: ownerID,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID,
State: model.InstallationStateCreationRequested,
}
err := sqlStore.CreateInstallation(creationRequestedInstallation, nil, fixDNSRecords(1))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
updateRequestedInstallation := &model.Installation{
Name: "test2",
OwnerID: ownerID,
Version: "version",
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID,
State: model.InstallationStateUpdateRequested,
}
err = sqlStore.CreateInstallation(updateRequestedInstallation, nil, fixDNSRecords(2))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
deletionRequestedInstallation := &model.Installation{
Name: "test3",
OwnerID: ownerID,
Version: "version",
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID,
State: model.InstallationStateDeletionRequested,
}
err = sqlStore.CreateInstallation(deletionRequestedInstallation, nil, fixDNSRecords(3))
require.NoError(t, err)
installations, err := sqlStore.GetUnlockedInstallationsPendingWork()
require.NoError(t, err)
require.Equal(t, []*model.Installation{creationRequestedInstallation, updateRequestedInstallation, deletionRequestedInstallation}, installations)
lockerID := model.NewID()
locked, err := sqlStore.LockInstallation(creationRequestedInstallation.ID, lockerID)
require.NoError(t, err)
require.True(t, locked)
installations, err = sqlStore.GetUnlockedInstallationsPendingWork()
require.NoError(t, err)
require.Equal(t, []*model.Installation{updateRequestedInstallation, deletionRequestedInstallation}, installations)
locked, err = sqlStore.LockInstallation(updateRequestedInstallation.ID, lockerID)
require.NoError(t, err)
require.True(t, locked)
installations, err = sqlStore.GetUnlockedInstallationsPendingWork()
require.NoError(t, err)
require.Equal(t, []*model.Installation{deletionRequestedInstallation}, installations)
locked, err = sqlStore.LockInstallation(deletionRequestedInstallation.ID, lockerID)
require.NoError(t, err)
require.True(t, locked)
installations, err = sqlStore.GetUnlockedInstallationsPendingWork()
require.NoError(t, err)
require.Empty(t, installations)
}
func TestGetUnlockedInstallationPendingDeletion(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
ownerID := model.NewID()
groupID := model.NewID()
updateRequestedInstallation := &model.Installation{
Name: "test2",
OwnerID: ownerID,
Version: "version",
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID,
State: model.InstallationStateUpdateRequested,
}
err := sqlStore.CreateInstallation(updateRequestedInstallation, nil, fixDNSRecords(2))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
deletionPendingInstallation := &model.Installation{
Name: "test",
OwnerID: ownerID,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID,
State: model.InstallationStateDeletionPending,
}
err = sqlStore.CreateInstallation(deletionPendingInstallation, nil, fixDNSRecords(1))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installations, err := sqlStore.GetUnlockedInstallationsPendingDeletion()
require.NoError(t, err)
require.Equal(t, []*model.Installation{deletionPendingInstallation}, installations)
lockerID := model.NewID()
locked, err := sqlStore.LockInstallation(deletionPendingInstallation.ID, lockerID)
require.NoError(t, err)
require.True(t, locked)
installations, err = sqlStore.GetUnlockedInstallationsPendingDeletion()
require.NoError(t, err)
require.Empty(t, installations)
}
func TestGetSingleTenantDatabaseConfigForInstallation(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
dbConfig := &model.SingleTenantDatabaseConfig{
PrimaryInstanceType: "db.r5.large",
ReplicaInstanceType: "db.r5.xlarge",
ReplicasCount: 11,
}
installation1 := model.Installation{
Name: "test",
SingleTenantDatabaseConfig: dbConfig,
}
err := sqlStore.CreateInstallation(&installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
fetchedDBConfig, err := sqlStore.GetSingleTenantDatabaseConfigForInstallation(installation1.ID)
require.NoError(t, err)
assert.Equal(t, dbConfig, fetchedDBConfig)
t.Run("no db config for installation", func(t *testing.T) {
installation := model.Installation{Name: "test2"}
err := sqlStore.CreateInstallation(&installation, nil, fixDNSRecords(2))
require.NoError(t, err)
_, err = sqlStore.GetSingleTenantDatabaseConfigForInstallation(installation.ID)
require.Error(t, err)
assert.Contains(t, err.Error(), "does not exist")
})
}
func TestLockInstallation(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
lockerID1 := model.NewID()
lockerID2 := model.NewID()
ownerID := model.NewID()
installation1 := &model.Installation{
Name: "test",
OwnerID: ownerID,
}
err := sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
installation2 := &model.Installation{
Name: "test2",
OwnerID: ownerID,
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(2))
require.NoError(t, err)
t.Run("installations should start unlocked", func(t *testing.T) {
installation1, err = sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, int64(0), installation1.LockAcquiredAt)
require.Nil(t, installation1.LockAcquiredBy)
installation2, err = sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.Equal(t, int64(0), installation2.LockAcquiredAt)
require.Nil(t, installation2.LockAcquiredBy)
})
t.Run("lock an unlocked installation", func(t *testing.T) {
locked, err := sqlStore.LockInstallation(installation1.ID, lockerID1)
require.NoError(t, err)
require.True(t, locked)
installation1, err = sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.NotEqual(t, int64(0), installation1.LockAcquiredAt)
require.Equal(t, lockerID1, *installation1.LockAcquiredBy)
})
t.Run("lock a previously locked installation", func(t *testing.T) {
t.Run("by the same locker", func(t *testing.T) {
locked, err := sqlStore.LockInstallation(installation1.ID, lockerID1)
require.NoError(t, err)
require.False(t, locked)
})
t.Run("by a different locker", func(t *testing.T) {
locked, err := sqlStore.LockInstallation(installation1.ID, lockerID2)
require.NoError(t, err)
require.False(t, locked)
})
})
t.Run("lock a second installation from a different locker", func(t *testing.T) {
locked, err := sqlStore.LockInstallation(installation2.ID, lockerID2)
require.NoError(t, err)
require.True(t, locked)
installation2, err = sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.NotEqual(t, int64(0), installation2.LockAcquiredAt)
require.Equal(t, lockerID2, *installation2.LockAcquiredBy)
})
t.Run("unlock the first installation", func(t *testing.T) {
unlocked, err := sqlStore.UnlockInstallation(installation1.ID, lockerID1, false)
require.NoError(t, err)
require.True(t, unlocked)
installation1, err = sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, int64(0), installation1.LockAcquiredAt)
require.Nil(t, installation1.LockAcquiredBy)
})
t.Run("unlock the first installation again", func(t *testing.T) {
unlocked, err := sqlStore.UnlockInstallation(installation1.ID, lockerID1, false)
require.NoError(t, err)
require.False(t, unlocked)
installation1, err = sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, int64(0), installation1.LockAcquiredAt)
require.Nil(t, installation1.LockAcquiredBy)
})
t.Run("force unlock the first installation again", func(t *testing.T) {
unlocked, err := sqlStore.UnlockInstallation(installation1.ID, lockerID1, true)
require.NoError(t, err)
require.False(t, unlocked)
installation1, err = sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, int64(0), installation1.LockAcquiredAt)
require.Nil(t, installation1.LockAcquiredBy)
})
t.Run("unlock the second installation from the wrong locker", func(t *testing.T) {
unlocked, err := sqlStore.UnlockInstallation(installation2.ID, lockerID1, false)
require.NoError(t, err)
require.False(t, unlocked)
installation2, err = sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.NotEqual(t, int64(0), installation2.LockAcquiredAt)
require.Equal(t, lockerID2, *installation2.LockAcquiredBy)
})
t.Run("force unlock the second installation from the wrong locker", func(t *testing.T) {
unlocked, err := sqlStore.UnlockInstallation(installation2.ID, lockerID1, true)
require.NoError(t, err)
require.True(t, unlocked)
installation2, err = sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.Equal(t, int64(0), installation2.LockAcquiredAt)
require.Nil(t, installation2.LockAcquiredBy)
})
}
func TestUpdateInstallation(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
ownerID1 := model.NewID()
ownerID2 := model.NewID()
groupID2 := model.NewID()
group1 := &model.Group{
Version: "group1-version",
Image: "custom/image",
MattermostEnv: model.EnvVarMap{
"Key1": model.EnvVar{Value: "Value1"},
},
}
err := sqlStore.CreateGroup(group1, nil)
require.NoError(t, err)
groupID1 := group1.ID
time.Sleep(1 * time.Millisecond)
someBool := false
installation1 := &model.Installation{
Name: "test",
OwnerID: ownerID1,
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
MattermostEnv: model.EnvVarMap{
"Var1": model.EnvVar{
Value: "Var1Value",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "1",
FieldPath: "some/path/neat",
},
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
Key: "key_string",
Optional: &someBool,
LocalObjectReference: corev1.LocalObjectReference{
Name: "configMap_localObjectReference",
},
},
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "secret_localObjectReference",
},
Key: "key_secret",
Optional: &someBool,
},
},
},
},
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID1,
CRVersion: model.DefaultCRVersion,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
installation2 := &model.Installation{
Name: "test2",
OwnerID: ownerID1,
Version: "version2",
Image: "custom/image",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID2,
State: model.InstallationStateStable,
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(2))
require.NoError(t, err)
installation1.OwnerID = ownerID2
installation1.Version = "version3"
installation1.Version = "custom/image"
installation1.Size = mmv1alpha1.Size1000String
installation1.Affinity = model.InstallationAffinityIsolated
installation1.GroupID = &groupID2
installation1.CRVersion = model.V1betaCRVersion
installation1.State = model.InstallationStateDeletionRequested
installation1.PriorityEnv = model.EnvVarMap{
"V1": model.EnvVar{
Value: "test",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "1",
FieldPath: "some/path/neat",
},
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
Key: "key_string",
Optional: &someBool,
LocalObjectReference: corev1.LocalObjectReference{
Name: "configMap_localObjectReference",
},
},
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "secret_localObjectReference",
},
Key: "key_secret",
Optional: &someBool,
},
},
},
}
err = sqlStore.UpdateInstallation(installation1)
require.NoError(t, err)
installation1.GroupID = &groupID1
err = sqlStore.UpdateInstallation(installation1)
require.NoError(t, err)
actualInstallation1, err := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation1, actualInstallation1)
actualInstallation2, err := sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation2, actualInstallation2)
t.Run("groups", func(t *testing.T) {
group, err := sqlStore.GetGroup(groupID1)
require.NoError(t, err)
require.Equal(t, group1, group)
t.Run("prevent saving merged installation", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation1.ID, true, true)
require.NoError(t, err)
mergedInstallation := installation1.Clone()
mergedInstallation.MergeWithGroup(group, true)
require.Equal(t, mergedInstallation, installation)
err = sqlStore.UpdateInstallation(installation)
require.Error(t, err)
})
})
}
func TestUpdateInstallationSequence(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
group1 := &model.Group{
Version: "group1-version",
MattermostEnv: model.EnvVarMap{
"Key1": model.EnvVar{Value: "Value1"},
},
}
err := sqlStore.CreateGroup(group1, nil)
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation1 := &model.Installation{
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
GroupID: &group1.ID,
State: model.InstallationStateCreationRequested,
}
err = sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
t.Run("group config not merged", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
err = sqlStore.UpdateInstallationGroupSequence(installation)
require.Error(t, err)
})
t.Run("group config merged", func(t *testing.T) {
installation, err := sqlStore.GetInstallation(installation1.ID, true, false)
require.NoError(t, err)
oldSequence := installation.GroupSequence
installation.SyncGroupAndInstallationSequence()
err = sqlStore.UpdateInstallationGroupSequence(installation)
require.NoError(t, err)
installation, err = sqlStore.GetInstallation(installation1.ID, true, false)
require.NoError(t, err)
assert.NotEqual(t, oldSequence, installation.GroupSequence)
})
}
func TestUpdateInstallationState(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
installation1 := &model.Installation{
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateCreationRequested,
}
err := sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation1.State = model.InstallationStateStable
installation1.Version = "new-version-that-should-not-be-saved"
err = sqlStore.UpdateInstallationState(installation1)
require.NoError(t, err)
storedInstallation, err := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
assert.Equal(t, storedInstallation.State, installation1.State)
assert.NotEqual(t, storedInstallation.Version, installation1.Version)
}
func TestGetInstallationsStatus(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
installation1 := &model.Installation{
Name: "test",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateCreationRequested,
}
err := sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
status, err := sqlStore.GetInstallationsStatus()
require.NoError(t, err)
assert.Equal(t, int64(1), status.InstallationsTotal)
assert.Equal(t, int64(0), status.InstallationsStable)
assert.Equal(t, int64(0), status.InstallationsHibernating)
assert.Equal(t, int64(0), status.InstallationsPendingDeletion)
assert.Equal(t, int64(1), status.InstallationsUpdating)
time.Sleep(1 * time.Millisecond)
installation2 := &model.Installation{
Name: "test2",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.ClusterInstallationStateStable,
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(2))
require.NoError(t, err)
status, err = sqlStore.GetInstallationsStatus()
require.NoError(t, err)
assert.Equal(t, int64(2), status.InstallationsTotal)
assert.Equal(t, int64(1), status.InstallationsStable)
assert.Equal(t, int64(0), status.InstallationsHibernating)
assert.Equal(t, int64(0), status.InstallationsPendingDeletion)
assert.Equal(t, int64(1), status.InstallationsUpdating)
time.Sleep(1 * time.Millisecond)
installation3 := &model.Installation{
Name: "test3",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateHibernating,
}
err = sqlStore.CreateInstallation(installation3, nil, fixDNSRecords(3))
require.NoError(t, err)
status, err = sqlStore.GetInstallationsStatus()
require.NoError(t, err)
assert.Equal(t, int64(3), status.InstallationsTotal)
assert.Equal(t, int64(1), status.InstallationsStable)
assert.Equal(t, int64(1), status.InstallationsHibernating)
assert.Equal(t, int64(0), status.InstallationsPendingDeletion)
assert.Equal(t, int64(1), status.InstallationsUpdating)
time.Sleep(1 * time.Millisecond)
installation4 := &model.Installation{
Name: "test4",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateDeletionPending,
}
err = sqlStore.CreateInstallation(installation4, nil, fixDNSRecords(4))
require.NoError(t, err)
status, err = sqlStore.GetInstallationsStatus()
require.NoError(t, err)
assert.Equal(t, int64(4), status.InstallationsTotal)
assert.Equal(t, int64(1), status.InstallationsStable)
assert.Equal(t, int64(1), status.InstallationsHibernating)
assert.Equal(t, int64(1), status.InstallationsPendingDeletion)
assert.Equal(t, int64(1), status.InstallationsUpdating)
time.Sleep(1 * time.Millisecond)
err = sqlStore.DeleteInstallation(installation1.ID)
require.NoError(t, err)
status, err = sqlStore.GetInstallationsStatus()
require.NoError(t, err)
assert.Equal(t, int64(3), status.InstallationsTotal)
assert.Equal(t, int64(1), status.InstallationsStable)
assert.Equal(t, int64(1), status.InstallationsHibernating)
assert.Equal(t, int64(1), status.InstallationsPendingDeletion)
assert.Equal(t, int64(0), status.InstallationsUpdating)
}
func TestGetInstallationCount(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
groupID := "g1"
err := sqlStore.CreateGroup(&model.Group{
ID: groupID,
Name: "group 1",
}, []*model.Annotation{})
assert.NoError(t, err)
installation1 := &model.Installation{
OwnerID: model.NewID(),
Name: "installation 1",
GroupID: &groupID,
}
err = sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
assert.NoError(t, err)
installation2 := &model.Installation{
OwnerID: model.NewID(),
Name: "installation 2",
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(2))
assert.NoError(t, err)
t.Run("test count all", func(t *testing.T) {
count, errTest := sqlStore.GetInstallationsCount(&model.InstallationFilter{
Paging: model.AllPagesWithDeleted(),
})
assert.NoError(t, errTest)
assert.Equal(t, int64(2), count)
})
t.Run("test count filter group", func(t *testing.T) {
count, errTest := sqlStore.GetInstallationsCount(&model.InstallationFilter{
Paging: model.AllPagesWithDeleted(),
GroupID: groupID,
})
assert.NoError(t, errTest)
assert.Equal(t, int64(1), count)
})
// Delete one installation for the following tests
err = sqlStore.DeleteInstallation(installation2.ID)
assert.NoError(t, err)
time.Sleep(1 * time.Millisecond)
t.Run("test count all with deleted", func(t *testing.T) {
count, err := sqlStore.GetInstallationsCount(&model.InstallationFilter{
Paging: model.AllPagesWithDeleted(),
})
assert.NoError(t, err)
assert.Equal(t, int64(2), count)
})
t.Run("test count all without deleted", func(t *testing.T) {
count, err := sqlStore.GetInstallationsCount(&model.InstallationFilter{
Paging: model.AllPagesNotDeleted(),
})
assert.NoError(t, err)
assert.Equal(t, int64(1), count)
})
}
func TestUpdateInstallationCRVersion(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
installation1 := &model.Installation{
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateCreationRequested,
CRVersion: model.V1betaCRVersion,
}
err := sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(3))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
err = sqlStore.UpdateInstallationCRVersion(installation1.ID, model.V1betaCRVersion)
require.NoError(t, err)
storedInstallation, err := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
assert.Equal(t, storedInstallation.CRVersion, model.V1betaCRVersion)
}
func TestGetInstallationsTotalDatabaseWeight(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
installation1 := &model.Installation{
Name: "test",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateStable,
CRVersion: model.V1betaCRVersion,
}
err := sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation2 := &model.Installation{
Name: "test2",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateStable,
CRVersion: model.V1betaCRVersion,
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(3))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation3 := &model.Installation{
Name: "test3",
OwnerID: model.NewID(),
Version: "version",
License: "this-is-a-license",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Size: mmv1alpha1.Size100String,
Affinity: model.InstallationAffinityIsolated,
State: model.InstallationStateHibernating,
CRVersion: model.V1betaCRVersion,
}
err = sqlStore.CreateInstallation(installation3, nil, fixDNSRecords(4))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
t.Run("no installations in filter", func(t *testing.T) {
totalWeight, err := sqlStore.GetInstallationsTotalDatabaseWeight([]string{})
require.NoError(t, err)
assert.Equal(t, float64(0), totalWeight)
})
t.Run("stable installation", func(t *testing.T) {
totalWeight, err := sqlStore.GetInstallationsTotalDatabaseWeight([]string{installation1.ID})
require.NoError(t, err)
assert.Equal(t, installation1.GetDatabaseWeight(), totalWeight)
assert.Equal(t, model.DefaultDatabaseWeight, totalWeight)
})
t.Run("hibernating installation", func(t *testing.T) {
totalWeight, err := sqlStore.GetInstallationsTotalDatabaseWeight([]string{installation3.ID})
require.NoError(t, err)
assert.Equal(t, installation3.GetDatabaseWeight(), totalWeight)
assert.Equal(t, model.HibernatingDatabaseWeight, totalWeight)
})
t.Run("three installations", func(t *testing.T) {
totalWeight, err := sqlStore.GetInstallationsTotalDatabaseWeight([]string{
installation1.ID,
installation2.ID,
installation3.ID,
})
require.NoError(t, err)
assert.Equal(t, installation1.GetDatabaseWeight()+installation2.GetDatabaseWeight()+installation3.GetDatabaseWeight(), totalWeight)
})
}
func TestDeleteInstallation(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := MakeTestSQLStore(t, logger)
defer CloseConnection(t, sqlStore)
ownerID1 := model.NewID()
ownerID2 := model.NewID()
groupID1 := model.NewID()
groupID2 := model.NewID()
installation1 := &model.Installation{
Name: "test",
OwnerID: ownerID1,
Version: "version",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID1,
State: model.InstallationStateCreationRequested,
}
err := sqlStore.CreateInstallation(installation1, nil, fixDNSRecords(1))
require.NoError(t, err)
time.Sleep(1 * time.Millisecond)
installation2 := &model.Installation{
Name: "test2",
OwnerID: ownerID2,
Version: "version2",
Database: model.InstallationDatabaseMysqlOperator,
Filestore: model.InstallationFilestoreMinioOperator,
Affinity: model.InstallationAffinityIsolated,
GroupID: &groupID2,
State: model.InstallationStateStable,
}
err = sqlStore.CreateInstallation(installation2, nil, fixDNSRecords(2))
require.NoError(t, err)
err = sqlStore.DeleteInstallation(installation1.ID)
require.NoError(t, err)
actualInstallation1, err := sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.NotEqual(t, 0, actualInstallation1.DeleteAt)
installation1.DeleteAt = actualInstallation1.DeleteAt
require.Equal(t, installation1, actualInstallation1)
actualInstallation2, err := sqlStore.GetInstallation(installation2.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation2, actualInstallation2)
time.Sleep(1 * time.Millisecond)
// Deleting again shouldn't change timestamp
err = sqlStore.DeleteInstallation(installation1.ID)
require.NoError(t, err)
actualInstallation1, err = sqlStore.GetInstallation(installation1.ID, false, false)
require.NoError(t, err)
require.Equal(t, installation1, actualInstallation1)
}
// Helpers
func createAndCheckDummyInstallation(t *testing.T, store *SQLStore) *model.Installation {
installation := &model.Installation{
Name: uuid.New()[:5],
OwnerID: model.NewID(),
}
createAndCheckInstallation(t, store, installation)
return installation
}
func createAndCheckInstallation(t *testing.T, store *SQLStore, installation *model.Installation) {
records := []*model.InstallationDNS{
{DomainName: fmt.Sprintf("dns-%s.domain.com", model.NewID())},
}
err := store.CreateInstallation(installation, nil, records)
require.NoError(t, err)
require.NotEmpty(t, installation.ID)
}
func fixDNSRecords(num int) []*model.InstallationDNS {
return []*model.InstallationDNS{
{DomainName: fmt.Sprintf("dns-%d.example.com", num)},
}
}
|
package redis
import (
"context"
"github.com/go-redis/redis"
)
var ctx = context.Background()
var rdb *redis.Client
// Initialize connects to redis
func Initialize() {
rdb = redis.NewClient(&redis.Options{
Addr: "redis:6379",
Password: "",
DB: 0,
})
}
// StoreToken stores csrf token with session id
func StoreToken(sessionID string, token string) (err error) {
err = rdb.Set(ctx, sessionID, token, 0).Err()
return
}
// GetToken gets csrf token from session id
func GetToken(sessionID string) (token string, err error) {
token, err = rdb.Get(ctx, sessionID).Result()
return
}
|
// Package dfc is a scalable object-storage based caching system with Amazon and Google Cloud backends.
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
*/
package dfc
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/NVIDIA/dfcpub/3rdparty/glog"
)
// enumerated REVS types (opaque TBD)
const (
smaptag = "smaptag"
bucketmdtag = "bucketmdtag" //
actiontag = "-action" // to make a pair (revs, action)
)
// =================== A Brief Theory of Operation =================================
//
// REVS (interface below) stands for REplicated, Versioned and Shared/Synchronized.
//
// A REVS is, typically, an object that represents some sort of cluster-wide metadata
// and, therefore, must be consistently replicated across the entire cluster.
// To that end, the "metasyncer" (metasync.go) provides a generic transport to send
// an arbitrary payload that combines any number of data units that look as follows:
//
// (shared-object, associated action-message)
//
// The action message (ActionMsg), if present, provides receivers with a context as
// to what exactly to do with the newly received versioned replica.
//
// In addition, storage target in particular make use of the previously synchronized
// version of the cluster map delivered to them by the metasyncer itself (as part of
// the aforementioned action message). Having both the current and the previous
// cluster maps allows targets to figure out whether to rebalance the cluster, and
// how to execute the rebalancing.
//
// In addition, the metasyncer:
//
// 1) tracks already synchronized REVS objects
// 2) validates REVS versions - in particular, prevents attempts to downgrade a
// newer version
// 3) makes sure that nodes that join the cluster get updated with the current set
// of REVS replicas
// 4) handles failures to reach existing cluster members - by periodically retrying
// to update them with the current REVS versions (as long and if those members
// remain listed in the most current/recent cluster map).
//
// Last but not the least, metasyncer checks that only the currently elected
// leader (aka "primary proxy") distributes the REVS objects, thus providing for
// simple serialization of the versioned updates.
//
// The usage is easy - there is a single sync() method that accepts variable
// number of parameters. Example sync-ing asynchronously without action messages:
//
// sync(false, newsmap, p.bucketmd.cloneL())
//
// To sync with action message(s) and to block until all the replicas are delivered,
// do:
//
// pair := &revspair{ p.smap.cloneU(), &ActionMsg{...} }
// sync(true, pair)
//
// On the receiving side, the metasyncer-generated payload gets extracted,
// validated, version-compared, and the corresponding Rx handler gets then called
// with the corresponding REVS replica and additional information that includes
// the action message (and the previous version of the cluster map, if applicable).
//
// =================== end of A Brief Theory of Operation ==========================
type revs interface {
tag() string // known tags enumerated above
cloneL() interface{} // clone self - the impl. must take lock if need be
version() int64 // version - locking not required
marshal() (b []byte, err error) // json-marshal - ditto
}
// REVS paired with an action message to provide receivers with additional context
type revspair struct {
revs revs
msg *ActionMsg
}
type metasyncer struct {
namedrunner
p *proxyrunner
synced struct {
copies map[string]revs // by tag
}
pending struct {
diamonds map[string]*daemonInfo
refused map[string]*daemonInfo
}
chfeed chan []interface{}
chfeedwait chan []interface{}
chstop chan struct{}
retryTimer *time.Timer
}
// c-tor
func newmetasyncer(p *proxyrunner) (y *metasyncer) {
y = &metasyncer{p: p}
y.synced.copies = make(map[string]revs)
y.pending.diamonds = make(map[string]*daemonInfo)
y.chstop = make(chan struct{}, 4)
y.chfeed = make(chan []interface{}, 16)
y.chfeedwait = make(chan []interface{})
y.retryTimer = time.NewTimer(time.Duration(time.Hour))
// no retry to run yet
y.retryTimer.Stop()
return
}
func (y *metasyncer) sync(wait bool, revsvec ...interface{}) {
assert(y.p != nil)
if !y.p.primary {
lead := "?"
if y.p.smap.ProxySI != nil {
lead = y.p.smap.ProxySI.DaemonID
}
glog.Errorf("%s (self) is not the primary proxy (%s) - cannot distribute REVS", y.p.si.DaemonID, lead)
return
}
// validate
for _, metaif := range revsvec {
if _, ok := metaif.(revs); !ok {
if _, ok = metaif.(*revspair); !ok {
assert(false, fmt.Sprintf("Expecting revs or revspair, getting %T instead", metaif))
}
}
}
if wait {
y.chfeedwait <- revsvec
<-y.chfeedwait
} else {
y.chfeed <- revsvec
}
}
func (y *metasyncer) run() error {
glog.Infof("Starting %s", y.name)
for {
select {
case revsvec, ok := <-y.chfeedwait:
if ok {
y.doSync(revsvec)
var s []interface{}
y.chfeedwait <- s
}
case revsvec, ok := <-y.chfeed:
if ok {
y.doSync(revsvec)
}
case <-y.retryTimer.C:
y.handlePending()
case <-y.chstop:
y.retryTimer.Stop()
return nil
}
if len(y.pending.diamonds) > 0 {
y.retryTimer.Reset(ctx.config.Periodic.RetrySyncTime)
} else {
y.retryTimer.Stop()
}
}
}
func (y *metasyncer) stop(err error) {
glog.Infof("Stopping %s, err: %v", y.name, err)
y.chstop <- struct{}{}
close(y.chstop)
close(y.chfeed)
close(y.chfeedwait)
}
func (y *metasyncer) doSync(revsvec []interface{}) {
var (
smap4bcast, smapSynced *Smap
jsbytes, jsmsg []byte
err error
payload = make(simplekvs)
newversions = make(map[string]revs)
check4newmembers bool
)
if v, ok := y.synced.copies[smaptag]; ok {
smapSynced = v.(*Smap)
}
for _, metaif := range revsvec {
var msg = &ActionMsg{}
// either (revs) or (revs, msg) pair
revs, ok1 := metaif.(revs)
if !ok1 {
mpair, ok2 := metaif.(*revspair)
assert(ok2)
revs, msg = mpair.revs, mpair.msg
if glog.V(3) {
glog.Infof("dosync tag=%s, msg=%+v", revs.tag(), msg)
}
}
tag := revs.tag()
jsbytes, err = revs.marshal()
assert(err == nil, err)
// new smap always carries the previously sync-ed version (in the action message value field)
if tag == smaptag {
assert(msg.Value == nil, "reserved for the previously sync-ed copy")
if smapSynced != nil {
// note: this assignment modifies the original msg's value field
msg.Value = smapSynced
}
}
jsmsg, err = json.Marshal(msg)
assert(err == nil, err)
payload[tag] = string(jsbytes)
payload[tag+actiontag] = string(jsmsg) // action message always on the wire even when empty
newversions[tag] = revs
}
jsbytes, err = json.Marshal(payload)
assert(err == nil, err)
if v, ok := newversions[smaptag]; ok {
smap4bcast = v.(*Smap)
check4newmembers = (smapSynced != nil)
} else if smapSynced == nil {
smap4bcast = y.p.smap.cloneL().(*Smap)
} else if smapSynced.version() != y.p.smap.versionL() {
assert(smapSynced.version() < y.p.smap.versionL())
smap4bcast = y.p.smap.cloneL().(*Smap)
check4newmembers = true
} else {
smap4bcast = smapSynced
}
y.pending.refused = make(map[string]*daemonInfo)
urlPath := URLPath(Rversion, Rdaemon, Rmetasync)
res := y.p.broadcastCluster(
urlPath,
nil, // query
http.MethodPut,
jsbytes,
smap4bcast,
ctx.config.Timeout.CplaneOperation,
)
for r := range res {
if r.err == nil {
continue
}
glog.Warningf("Failed to sync %s, err: %v (%d)", r.si.DaemonID, r.err, r.status)
y.pending.diamonds[r.si.DaemonID] = r.si
if IsErrConnectionRefused(r.err) {
y.pending.refused[r.si.DaemonID] = r.si
}
}
// handle connection-refused right away
for i := 0; i < 2; i++ {
if len(y.pending.refused) == 0 {
break
}
time.Sleep(time.Second)
y.handleRefused(urlPath, jsbytes)
}
// find out smap delta and, if exists, piggy-back on the handle-pending "venue"
// (which may not be optimal)
if check4newmembers {
for sid, si := range smap4bcast.Tmap {
if _, ok := smapSynced.Tmap[sid]; !ok {
y.pending.diamonds[sid] = si
}
}
for pid, pi := range smap4bcast.Pmap {
if _, ok := smapSynced.Pmap[pid]; !ok {
y.pending.diamonds[pid] = pi
}
}
}
for tag, meta := range newversions {
y.synced.copies[tag] = meta
}
}
func (y *metasyncer) handlePending() {
var (
jsbytes []byte
err error
)
for id := range y.pending.diamonds {
if !y.p.smap.containsL(id) {
delete(y.pending.diamonds, id)
}
}
if len(y.pending.diamonds) == 0 {
glog.Infoln("no pending REVS - cluster synchronized")
return
}
payload := make(simplekvs)
for _, revs := range y.synced.copies {
jsbytes, err = revs.marshal()
assert(err == nil, err)
tag := revs.tag()
payload[tag] = string(jsbytes)
}
jsbytes, err = json.Marshal(payload)
assert(err == nil, err)
var servers []*daemonInfo
for _, s := range y.pending.diamonds {
servers = append(servers, s)
}
res := y.p.broadcast(
URLPath(Rversion, Rdaemon, Rmetasync),
nil, // query
http.MethodPut,
jsbytes,
servers,
ctx.config.Timeout.CplaneOperation,
)
for r := range res {
if r.err != nil {
glog.Warningf("... failing to sync %s, err: %v (%d)", r.si.DaemonID, r.err, r.status)
} else {
delete(y.pending.diamonds, r.si.DaemonID)
}
}
}
func (y *metasyncer) handleRefused(urlPath string, body []byte) {
var servers []*daemonInfo
for _, s := range y.pending.refused {
servers = append(servers, s)
}
res := y.p.broadcast(urlPath, nil, http.MethodPut, body,
servers, ctx.config.Timeout.CplaneOperation)
for r := range res {
if r.err != nil {
glog.Warningf("... failing to sync %s, err: %v (%d)", r.si.DaemonID, r.err, r.status)
} else {
delete(y.pending.diamonds, r.si.DaemonID)
delete(y.pending.refused, r.si.DaemonID)
glog.Infoln("retried & sync-ed", r.si.DaemonID)
}
}
}
|
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/iouringfs"
"gvisor.dev/gvisor/pkg/sentry/kernel"
)
// IOUringSetup implements linux syscall io_uring_setup(2).
func IOUringSetup(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
if !kernel.IOUringEnabled {
return 0, nil, linuxerr.ENOSYS
}
entries := uint32(args[0].Uint())
paramsAddr := args[1].Pointer()
var params linux.IOUringParams
if entries == 0 {
return 0, nil, linuxerr.EINVAL
}
if _, err := params.CopyIn(t, paramsAddr); err != nil {
return 0, nil, err
}
for i := int(0); i < len(params.Resv); i++ {
if params.Resv[i] != 0 {
return 0, nil, linuxerr.EINVAL
}
}
// List of currently supported flags in our IO_URING implementation.
const supportedFlags = 0 // Currently support none
// Since we don't implement everything, we fail explicitly on flags that are unimplemented.
if params.Flags|supportedFlags != supportedFlags {
return 0, nil, linuxerr.EINVAL
}
vfsObj := t.Kernel().VFS()
iouringfd, err := iouringfs.New(t, vfsObj, entries, ¶ms)
if err != nil {
// return 0, nil, err
return 0, nil, linuxerr.EPERM
}
defer iouringfd.DecRef(t)
fd, err := t.NewFDFrom(0, iouringfd, kernel.FDFlags{
// O_CLOEXEC is always set up. See io_uring/io_uring.c:io_uring_install_fd().
CloseOnExec: true,
})
if err != nil {
return 0, nil, err
}
if _, err := params.CopyOut(t, paramsAddr); err != nil {
return 0, nil, err
}
return uintptr(fd), nil, nil
}
// IOUringEnter implements linux syscall io_uring_enter(2).
func IOUringEnter(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
if !kernel.IOUringEnabled {
return 0, nil, linuxerr.ENOSYS
}
fd := int32(args[0].Int())
toSubmit := uint32(args[1].Uint())
minComplete := uint32(args[2].Uint())
flags := uint32(args[3].Uint())
sigSet := args[4].Pointer()
ret := -1
// List of currently supported flags for io_uring_enter(2).
const supportedFlags = linux.IORING_ENTER_GETEVENTS
// Since we don't implement everything, we fail explicitly on flags that are unimplemented.
if flags|supportedFlags != supportedFlags {
return uintptr(ret), nil, linuxerr.EINVAL
}
// Currently don't support replacing an existing signal mask.
if sigSet != hostarch.Addr(0) {
return uintptr(ret), nil, linuxerr.EFAULT
}
// If a user requested to submit zero SQEs, then we don't process any and return right away.
if toSubmit == 0 {
return uintptr(ret), nil, nil
}
file := t.GetFile(fd)
if file == nil {
return uintptr(ret), nil, linuxerr.EBADF
}
defer file.DecRef(t)
iouringfd, ok := file.Impl().(*iouringfs.FileDescription)
if !ok {
return uintptr(ret), nil, linuxerr.EBADF
}
ret, err := iouringfd.ProcessSubmissions(t, toSubmit, minComplete, flags)
if err != nil {
return uintptr(ret), nil, err
}
return uintptr(ret), nil, nil
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package size_test
import (
"context"
"os"
"testing"
"time"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/benchmarks/base"
"gvisor.dev/gvisor/test/benchmarks/harness"
"gvisor.dev/gvisor/test/benchmarks/tools"
)
// BenchmarkSizeEmpty creates N empty containers and reads memory usage from
// /proc/meminfo.
func BenchmarkSizeEmpty(b *testing.B) {
machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer machine.CleanUp()
meminfo := tools.Meminfo{}
ctx := context.Background()
containers := make([]*dockerutil.Container, 0, b.N)
// DropCaches before the test.
harness.DropCaches(machine)
// Check available memory on 'machine'.
cmd, args := meminfo.MakeCmd()
before, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to get meminfo: %v", err)
}
// Make N containers.
for i := 0; i < b.N; i++ {
container := machine.GetContainer(ctx, b)
containers = append(containers, container)
if err := container.Spawn(ctx, dockerutil.RunOpts{
Image: "benchmarks/alpine",
}, "sh", "-c", "echo Hello && sleep 1000"); err != nil {
base.CleanUpContainers(ctx, containers)
b.Fatalf("failed to run container: %v", err)
}
if _, err := container.WaitForOutputSubmatch(ctx, "Hello", 5*time.Second); err != nil {
base.CleanUpContainers(ctx, containers)
b.Fatalf("failed to read container output: %v", err)
}
}
// Drop caches again before second measurement.
harness.DropCaches(machine)
// Check available memory after containers are up.
after, err := machine.RunCommand(cmd, args...)
base.CleanUpContainers(ctx, containers)
if err != nil {
b.Fatalf("failed to get meminfo: %v", err)
}
meminfo.Report(b, before, after)
}
// BenchmarkSizeNginx starts N containers running Nginx, checks that they're
// serving, and checks memory used based on /proc/meminfo.
func BenchmarkSizeNginx(b *testing.B) {
machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer machine.CleanUp()
// DropCaches for the first measurement.
harness.DropCaches(machine)
// Measure MemAvailable before creating containers.
meminfo := tools.Meminfo{}
cmd, args := meminfo.MakeCmd()
before, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo command: %v", err)
}
// Make N Nginx containers.
ctx := context.Background()
runOpts := dockerutil.RunOpts{
Image: "benchmarks/nginx",
}
const port = 80
servers := base.StartServers(ctx, b,
base.ServerArgs{
Machine: machine,
Port: port,
RunOpts: runOpts,
Cmd: []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"},
})
defer base.CleanUpContainers(ctx, servers)
// DropCaches after servers are created.
harness.DropCaches(machine)
// Take after measurement.
after, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo command: %v", err)
}
meminfo.Report(b, before, after)
}
// BenchmarkSizeNode starts N containers running a Node app, checks that
// they're serving, and checks memory used based on /proc/meminfo.
func BenchmarkSizeNode(b *testing.B) {
machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer machine.CleanUp()
// Make a redis instance for Node to connect.
ctx := context.Background()
redis := base.RedisInstance(ctx, b, machine)
defer redis.CleanUp(ctx)
// DropCaches after redis is created.
harness.DropCaches(machine)
// Take before measurement.
meminfo := tools.Meminfo{}
cmd, args := meminfo.MakeCmd()
before, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo commend: %v", err)
}
// Create N Node servers.
runOpts := dockerutil.RunOpts{
Image: "benchmarks/node",
WorkDir: "/usr/src/app",
Links: []string{redis.MakeLink("redis")},
}
nodeCmd := []string{"node", "index.js", "redis"}
const port = 8080
servers := base.StartServers(ctx, b,
base.ServerArgs{
Machine: machine,
Port: port,
RunOpts: runOpts,
Cmd: nodeCmd,
})
defer base.CleanUpContainers(ctx, servers)
// DropCaches after servers are created.
harness.DropCaches(machine)
// Take after measurement.
cmd, args = meminfo.MakeCmd()
after, err := machine.RunCommand(cmd, args...)
if err != nil {
b.Fatalf("failed to run meminfo command: %v", err)
}
meminfo.Report(b, before, after)
}
// TestMain is the main method for package network.
func TestMain(m *testing.M) {
harness.Init()
os.Exit(m.Run())
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/sem/builtins"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
"github.com/golang-commonmark/markdown"
"github.com/spf13/cobra"
)
func init() {
cmds = append(cmds, &cobra.Command{
Use: "functions <output-dir>",
Short: "generate markdown documentation of functions and operators",
RunE: func(cmd *cobra.Command, args []string) error {
outDir := filepath.Join("docs", "generated", "sql")
if len(args) > 0 {
outDir = args[0]
}
if stat, err := os.Stat(outDir); err != nil {
return err
} else if !stat.IsDir() {
return errors.Errorf("%q is not a directory", outDir)
}
if err := ioutil.WriteFile(
filepath.Join(outDir, "functions.md"), generateFunctions(builtins.AllBuiltinNames, true), 0644,
); err != nil {
return err
}
if err := ioutil.WriteFile(
filepath.Join(outDir, "aggregates.md"), generateFunctions(builtins.AllAggregateBuiltinNames, false), 0644,
); err != nil {
return err
}
if err := ioutil.WriteFile(
filepath.Join(outDir, "window_functions.md"), generateFunctions(builtins.AllWindowBuiltinNames, false), 0644,
); err != nil {
return err
}
return ioutil.WriteFile(
filepath.Join(outDir, "operators.md"), generateOperators(), 0644,
)
},
})
}
type operation struct {
left string
right string
ret string
op string
}
func (o operation) String() string {
if o.right == "" {
return fmt.Sprintf("<code>%s</code>%s", o.op, linkTypeName(o.left))
}
return fmt.Sprintf("%s <code>%s</code> %s", linkTypeName(o.left), o.op, linkTypeName(o.right))
}
type operations []operation
func (p operations) Len() int { return len(p) }
func (p operations) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p operations) Less(i, j int) bool {
if p[i].right != "" && p[j].right == "" {
return false
}
if p[i].right == "" && p[j].right != "" {
return true
}
if p[i].left != p[j].left {
return p[i].left < p[j].left
}
if p[i].right != p[j].right {
return p[i].right < p[j].right
}
return p[i].ret < p[j].ret
}
func generateOperators() []byte {
ops := make(map[string]operations)
for optyp, overloads := range tree.UnaryOps {
op := optyp.String()
for _, untyped := range overloads {
v := untyped.(*tree.UnaryOp)
ops[op] = append(ops[op], operation{
left: v.Typ.String(),
ret: v.ReturnType.String(),
op: op,
})
}
}
for optyp, overloads := range tree.BinOps {
op := optyp.String()
for _, untyped := range overloads {
v := untyped.(*tree.BinOp)
left := v.LeftType.String()
right := v.RightType.String()
ops[op] = append(ops[op], operation{
left: left,
right: right,
ret: v.ReturnType.String(),
op: op,
})
}
}
for optyp, overloads := range tree.CmpOps {
op := optyp.String()
for _, untyped := range overloads {
v := untyped.(*tree.CmpOp)
left := v.LeftType.String()
right := v.RightType.String()
ops[op] = append(ops[op], operation{
left: left,
right: right,
ret: "bool",
op: op,
})
}
}
var opstrs []string
for k, v := range ops {
sort.Sort(v)
opstrs = append(opstrs, k)
}
sort.Strings(opstrs)
b := new(bytes.Buffer)
seen := map[string]bool{}
for _, op := range opstrs {
fmt.Fprintf(b, "<table><thead>\n")
fmt.Fprintf(b, "<tr><td><code>%s</code></td><td>Return</td></tr>\n", op)
fmt.Fprintf(b, "</thead><tbody>\n")
for _, v := range ops[op] {
s := fmt.Sprintf("<tr><td>%s</td><td>%s</td></tr>\n", v.String(), linkTypeName(v.ret))
if seen[s] {
continue
}
seen[s] = true
b.WriteString(s)
}
fmt.Fprintf(b, "</tbody></table>")
fmt.Fprintln(b)
}
return b.Bytes()
}
// TODO(mjibson): use the exported value from sql/parser/pg_builtins.go.
const notUsableInfo = "Not usable; exposed only for compatibility with PostgreSQL."
func generateFunctions(from []string, categorize bool) []byte {
functions := make(map[string][]string)
seen := make(map[string]struct{})
md := markdown.New(markdown.XHTMLOutput(true), markdown.Nofollow(true))
for _, name := range from {
// NB: funcs can appear more than once i.e. upper/lowercase variants for
// faster lookups, so normalize to lowercase and de-dupe using a set.
name = strings.ToLower(name)
if _, ok := seen[name]; ok {
continue
}
seen[name] = struct{}{}
props, fns := builtins.GetBuiltinProperties(name)
if !props.ShouldDocument() {
continue
}
for _, fn := range fns {
if fn.Info == notUsableInfo {
continue
}
// We generate docs for both aggregates and window functions in separate
// files, so we want to omit them when processing all builtins.
if categorize && (props.Class == tree.AggregateClass || props.Class == tree.WindowClass) {
continue
}
args := fn.Types.String()
retType := fn.InferReturnTypeFromInputArgTypes(fn.Types.Types())
ret := retType.String()
cat := props.Category
if cat == "" {
cat = strings.ToUpper(ret)
}
if !categorize {
cat = ""
}
extra := ""
if fn.Info != "" {
// Render the info field to HTML upfront, because Markdown
// won't do it automatically in a table context.
// Boo Markdown, bad Markdown.
// TODO(knz): Do not use Markdown.
info := md.RenderToString([]byte(fn.Info))
extra = fmt.Sprintf("<span class=\"funcdesc\">%s</span>", info)
}
s := fmt.Sprintf("<tr><td><a name=\"%s\"></a><code>%s(%s) → %s</code></td><td>%s</td></tr>", name, name, linkArguments(args), linkArguments(ret), extra)
functions[cat] = append(functions[cat], s)
}
}
var cats []string
for k, v := range functions {
sort.Strings(v)
cats = append(cats, k)
}
sort.Strings(cats)
// HACK: swap "Compatibility" to be last.
// TODO(dt): Break up generated list be one _include per category, to allow
// manually written copy on some sections.
for i, cat := range cats {
if cat == "Compatibility" {
cats = append(append(cats[:i], cats[i+1:]...), "Compatibility")
break
}
}
b := new(bytes.Buffer)
for _, cat := range cats {
if categorize {
fmt.Fprintf(b, "### %s functions\n\n", cat)
}
b.WriteString("<table>\n<thead><tr><th>Function → Returns</th><th>Description</th></tr></thead>\n")
b.WriteString("<tbody>\n")
b.WriteString(strings.Join(functions[cat], "\n"))
b.WriteString("</tbody>\n</table>\n\n")
}
return b.Bytes()
}
var linkRE = regexp.MustCompile(`([a-z]+)([\.\[\]]*)$`)
func linkArguments(t string) string {
sp := strings.Split(t, ", ")
for i, s := range sp {
sp[i] = linkRE.ReplaceAllStringFunc(s, func(s string) string {
match := linkRE.FindStringSubmatch(s)
s = linkTypeName(match[1])
return s + match[2]
})
}
return strings.Join(sp, ", ")
}
func linkTypeName(s string) string {
s = strings.TrimSuffix(s, "{}")
s = strings.TrimSuffix(s, "{*}")
name := s
switch s {
case "timestamptz":
s = "timestamp"
}
s = strings.TrimSuffix(s, "[]")
s = strings.TrimSuffix(s, "*")
switch s {
case "int", "decimal", "float", "bool", "date", "timestamp", "interval", "string", "bytes",
"inet", "uuid", "collatedstring", "time":
s = fmt.Sprintf("<a href=\"%s.html\">%s</a>", s, name)
}
return s
}
|
package main
import (
"fmt"
"log"
"net"
"net/http"
"net/rpc"
"strings"
)
// rpc包提供了通过网络或其他I/O连接对一个对象的导出方法的访问
// 服务端注册一个对象,使它作为一个服务被暴露,服务的名字是该对象的类型名
// 注册之后,对象的导出方法就可以被远程访问
// 服务端可以注册多个不同类型的对象(服务),但注册具有相同类型的多个对象是错误的
func main() {
// rpc服务端到客户端的完整示例
example()
example2()
}
type Hello struct {}
func (h *Hello) Say(args *[]string, reply *string) error {
*reply = strings.Join(*args, " ")
return nil
}
func example() {
// 初始化Hello
hello := new(Hello)
// 初始化服务端
// 创建并返回一个*rpc.Server
server := rpc.NewServer()
// 在server注册并公布rcvr的方法集
// 满足: 方法是导出的;方法有两个参数,都是导出类型或内建类型;方法的第二个参数是指针;方法只有一个error接口类型的返回值
server.Register(hello)
// 类似Register,但使用提供的name代替rcvr的具体类型名作为服务名
server.RegisterName("Hello", hello)
// 监听端口
l, err := net.Listen("tcp", "127.0.0.1:1234")
if err != nil {
log.Fatalf("net.Listen tcp :0: %v", err)
}
// 接收监听器l获取的连接,然后服务每一个连接。Accept会阻塞,调用者应另开线程:"go server.Accept(l)"
go server.Accept(l)
// 注册server的RPC信息HTTP处理器对应到rpcPath,注册server的debug信息HTTP处理器对应到debugPath
// HandleHTTP会注册到http.DefaultServeMux。之后,仍需要调用http.Serve(),一般会另开线程:"go http.Serve(l, nil)"
server.HandleHTTP("/hello", "/debug")
// 将addr作为TCP地址解析并返回
address, err := net.ResolveTCPAddr("tcp", "127.0.0.1:1234")
if err != nil {
log.Fatal("ResolveTCPAddr error: ", err)
}
// 在网络协议net上连接本地地址laddr和远端地址raddr
// net必须是"tcp"、"tcp4"、"tcp6";如果laddr不是nil,将使用它作为本地地址,否则自动选择一个本地地址
conn, _ := net.DialTCP("tcp", nil, address)
defer conn.Close()
// 初始化客户端
// 返回一个新的rpc.Client,以管理对连接另一端的服务的请求。它添加缓冲到连接的写入侧,以便将回复的头域和有效负载作为一个单元发送
client := rpc.NewClient(conn)
defer client.Close()
// 设置参数
args := &[]string{"Hello", "World!"}
// 初始化接收
reply := new(string)
err = client.Call("Hello.Say", args, reply)
if err != nil {
log.Fatal("Hello error:", err)
}
log.Println(*reply)
}
func example2() {
hello := new(Hello)
rpc.Register(hello)
rpc.HandleHTTP()
// 设置服务端监听
l, e := net.Listen("tcp", ":1234")
if e != nil {
log.Fatal("listen error:", e)
}
go http.Serve(l, nil)
// 客户端连接服务端
client, err := rpc.DialHTTP("tcp", "127.0.0.1:1234")
if err != nil {
log.Fatal("dialing:", err)
}
args := &[]string{"Hello", "Gopher!"}
reply := new(string)
err = client.Call("Hello.Say", args, reply)
if err != nil {
log.Fatal("arith error:", err)
}
fmt.Println(*reply)
} |
/*
* REST API
*
* Rockset's REST API allows for creating and managing all resources in Rockset. Each supported endpoint is documented below. All requests must be authorized with a Rockset API key, which can be created in the [Rockset console](https://console.rockset.com). The API key must be provided as `ApiKey <api_key>` in the `Authorization` request header. For example: ``` Authorization: ApiKey aB35kDjg93J5nsf4GjwMeErAVd832F7ad4vhsW1S02kfZiab42sTsfW5Sxt25asT ``` All endpoints are only accessible via https. Build something awesome!
*
* API version: v1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package rockset
import (
"bytes"
"encoding/json"
"fmt"
)
type CreateIntegrationRequest struct {
// descriptive label
Name string `json:"name"`
// longer explanation for the integration
Description string `json:"description,omitempty"`
// Amazon S3 details, must have one of aws_access_key or aws_role
S3 *S3Integration `json:"s3,omitempty"`
// Amazon Kinesis details, must have one of aws_access_key or aws_role
Kinesis *KinesisIntegration `json:"kinesis,omitempty"`
// Amazon DynamoDB details, must have one of aws_access_key or aws_role
Dynamodb *DynamodbIntegration `json:"dynamodb,omitempty"`
// Amazon Redshift details
Redshift *RedshiftIntegration `json:"redshift,omitempty"`
// GCS details
Gcs *GcsIntegration `json:"gcs,omitempty"`
}
func (m CreateIntegrationRequest) PrintResponse() {
r, err := json.Marshal(m)
var out bytes.Buffer
err = json.Indent(&out, []byte(string(r)), "", " ")
if err != nil {
fmt.Println("error parsing string")
return
}
fmt.Println(out.String())
}
|
package handlers
import (
"fmt"
"github.com/gabrielroriz/fineasy/database"
)
func InsertDBConfig() *database.DBConfig {
dbConfig := database.DBConfig{}
fmt.Printf("host: ")
fmt.Scanf("%s", &(dbConfig.Host))
fmt.Printf("port: ")
fmt.Scanf("%s", &(dbConfig.Port))
fmt.Printf("database: ")
fmt.Scanf("%s", &(dbConfig.Database))
fmt.Printf("username: ")
fmt.Scanf("%s", &(dbConfig.User))
fmt.Printf("password: ")
fmt.Scanf("%s", &(dbConfig.Password))
return &dbConfig
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
)
var codes []string
func main() {
codes = append(codes, "abc", "def", "fgh","ijk")
http.HandleFunc("/", get)
http.ListenAndServe(":9093", nil)
}
func get(w http.ResponseWriter, r *http.Request) {
jsoResult, err := json.Marshal(codes)
if err != nil {
log.Fatal("Erro processing json")
}
fmt.Fprintf(w, string(jsoResult))
} |
package game
import "github.com/gorilla/websocket"
type Player struct {
BaseObject
Username string `json:"username"`
Password string `json:"password"`
Socket *websocket.Conn `json:"-"`
}
func NewPlayer(chunk *Chunk, socket *websocket.Conn) *Player {
player := &Player{
BaseObject: *NewBaseObject(0, 0, chunk, NORTH, "Player", TYPE_PLAYER),
Socket: socket,
}
chunks := chunk.Map.GetAllChunksAround(player)
for _, chunk := range chunks {
chunk.Subscribe(player)
}
return player
}
func (this *Player) UnsubscribeAll() {
chunks := this.Chunk.Map.GetAllChunksAround(this)
for _, chunk := range chunks {
chunk.Unsubscribe(this)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.