text stringlengths 11 4.05M |
|---|
package service
import (
"github.com/facebookgo/inject"
)
//s依赖注入
func AppService() *Object {
var g inject.Graph
baseService := BaseService{}
g.Provide(&inject.Object{Value: baseService})
g.Populate()
return &Object{
AppService: &baseService,
}
}
type Object struct {
AppService *BaseService
}
|
package adapter
import "github.com/case2912/go-curd-clean-architecture/domain"
type UserController interface {
CreateUser(domain.User) domain.User
}
|
package majiangserver
import (
cmn "common"
"logger"
)
func InitGlobalConfig() {
logger.Info("init........config")
//初始化全局变量--翻对应的颗数
cfg := cmn.GetDaerGlobalConfig("551")
if cfg != nil {
KeAmount[0] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("552")
if cfg != nil {
KeAmount[1] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("553")
if cfg != nil {
KeAmount[2] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("554")
if cfg != nil {
KeAmount[3] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("555")
if cfg != nil {
KeAmount[4] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("556")
if cfg != nil {
KeAmount[5] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
//初始化全局变量--名堂
cfg = cmn.GetDaerGlobalConfig("561")
if cfg != nil {
MinTangFanShu[MTZiMo] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("562")
if cfg != nil {
MinTangFanShu[MTGui] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("563")
if cfg != nil {
MinTangFanShu[MTDaDuiZi] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("564")
if cfg != nil {
MinTangFanShu[MTQingYiSe] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("565")
if cfg != nil {
MinTangFanShu[MTNoneHongZhong] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("566")
if cfg != nil {
MinTangFanShu[MTQiDui] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("567")
if cfg != nil {
MinTangFanShu[MTGangShangHua] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("568")
if cfg != nil {
MinTangFanShu[MTGangShangPao] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("569")
if cfg != nil {
MinTangFanShu[MTQiangGang] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("570")
if cfg != nil {
MinTangFanShu[MTTianHu] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("571")
if cfg != nil {
MinTangFanShu[MTBao] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
cfg = cmn.GetDaerGlobalConfig("572")
if cfg != nil {
MinTangFanShu[MTDingBao] = cfg.IntValue
} else {
logger.Error("GetDaerGlobalConfig return nil")
}
logger.Info("init........config....end")
}
//常规数量定义
const (
FirstCardsAmount = 13 //初始化手牌的数量
RoomMaxPlayerAmount = 4 //房间的最大容量
)
//结算时的Tag信息 0:无,1:自摸,2:点炮, 3:破产
const (
JSNone = iota
JSZiMo
JSDianPao
JSPoChan
)
//名堂
const (
MTZiMo = iota //自摸
MTGui //归
MTDaDuiZi //大对子
MTQingYiSe //清一色
MTNoneHongZhong //无鬼
MTQiDui //七对
MTGangShangHua //杠上花
MTGangShangPao //杠上炮
MTQiangGang //抢杠
MTTianHu //天胡
MTBao //报牌
MTDingBao //顶报
)
//动作(Action)
const (
ANone = iota
AReady //准备
ACancelReady //取消准备
ATuoGuan //托管
ACancelTuoGuan //取消托管
AGuo //过
AChu //出
AMo //摸
APeng //碰
ATiePeng //贴鬼碰
AAnGang //暗杠
AMingGang //明杠
ATieMingGang //贴鬼明杠
ABuGang //补杠
AHu //胡
ABao //报
)
//动作的结果
const (
ACSuccess = iota //成功
ACAbandon //放弃执行
ACWaitingOtherPlayer //等待其他玩家操作
AOccursError //发送生了错误
)
//模式类型
const (
PTUknown = iota
PTSingle
PTPair
PTKan
PTGang
PTAnGang
PTSZ
)
const (
REFull = iota
)
//游戏进行的阶段
const (
RSReady = iota
RSBankerTianHuStage
RSNotBankerBaoPaiStage
RSBankerChuPaiStage
RSBankerBaoPaiStage
RSLoopWorkStage
RSSettlement
)
//番数对应的颗数
//名堂的翻数
var KeAmount = []int32{2, 5, 10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120, 10240, 20480, 40960, 81920, 163840, 327680, 655360}
//名堂的翻数
var MinTangFanShu = map[uint]int32{
MTZiMo: 1, //自摸
MTGui: 1, //归
MTDaDuiZi: 2, //大对子
MTQingYiSe: 2, //清一色
MTNoneHongZhong: 3, //无鬼
MTQiDui: 2, //七对
MTGangShangHua: 1, //杠上花
MTGangShangPao: 3, //杠上炮
MTQiangGang: 3, //抢杠
MTTianHu: 3, //天胡
MTBao: 2, //报牌
MTDingBao: 1, //顶报
}
|
package services
import (
"goscrum/server/models"
"github.com/jinzhu/gorm"
)
type ParticipantService struct {
db *gorm.DB
}
func NewParticipantService(db *gorm.DB) ParticipantService {
return ParticipantService{db: db}
}
func (service *ParticipantService) GetParticipantByUserId(userId string) (*models.Participant, error) {
participant := models.Participant{}
err := service.db.
Preload("Projects").
Where("user_id = ?", userId).
First(&participant).Error
if err != nil && !gorm.IsRecordNotFoundError(err) {
return nil, err
}
return &participant, nil
}
|
package accounts
import (
"context"
"github.com/CMedrado/DesafioStone/pkg/domain/entities"
"github.com/google/uuid"
)
func (a *Storage) ReturnAccountID(id uuid.UUID) (entities.Account, error) {
var account entities.Account
statement := `SELECT * FROM accounts WHERE id=$1`
err := a.pool.QueryRow(context.Background(), statement, id).Scan(&account.ID, &account.Name, &account.CPF, &account.Secret, &account.Balance, &account.CreatedAt)
if err != nil && err.Error() != ("no rows in result set") {
return entities.Account{}, err
}
return account, nil
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flags
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
"text/template"
)
type TemplateFlag struct {
rawTemplate string
template *template.Template
context interface{}
}
func (t *TemplateFlag) String() string {
return t.rawTemplate
}
func (t *TemplateFlag) Usage() string {
defaultUsage := "Format output with go-template."
if t.context != nil {
goType := reflect.TypeOf(t.context)
url := fmt.Sprintf("https://godoc.org/%s#%s", goType.PkgPath(), goType.Name())
defaultUsage += fmt.Sprintf(" For full struct documentation, see %s", url)
}
return defaultUsage
}
func (t *TemplateFlag) Set(value string) error {
tmpl, err := parseTemplate(value)
if err != nil {
return fmt.Errorf("setting template flag: %w", err)
}
t.rawTemplate = value
t.template = tmpl
return nil
}
func (t *TemplateFlag) Type() string {
return fmt.Sprintf("%T", t)
}
func (t *TemplateFlag) Template() *template.Template {
return t.template
}
func NewTemplateFlag(value string, context interface{}) *TemplateFlag {
return &TemplateFlag{
template: template.Must(parseTemplate(value)),
rawTemplate: value,
context: context,
}
}
func parseTemplate(value string) (*template.Template, error) {
funcs := template.FuncMap{
"json": func(v interface{}) string {
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.Encode(v)
return strings.TrimSpace(buf.String())
},
"join": strings.Join,
"title": strings.Title,
"lower": strings.ToLower,
"upper": strings.ToUpper,
}
return template.New("flagtemplate").Funcs(funcs).Parse(value)
}
|
/*
@File : httpServer.go
@Time : 2022/02/08 10:13:42
@Author : lpp
@Version : 1.0.0
@Contact : golpp@qq.com
@Desc : RESTFUL 服务
*/
package httpServer
import (
"grpc-rest/protos"
"net/http"
"github.com/gin-gonic/gin"
"github.com/golang/protobuf/jsonpb"
)
// RestServer 为订单服务实现了一个 REST 服务。
type RestServer struct {
server *http.Server
orderService protos.OrderServiceServer // 与我们注入 gRPC 服务端的订单服务相同
errCh chan error
}
// NewRestServer 是一个创建 RestServer 的便捷函数
func NewRestServer(orderService protos.OrderServiceServer, port string) RestServer {
gin.SetMode(gin.DebugMode)
router := gin.Default()
rs := RestServer{
server: &http.Server{
Addr: ":" + port,
Handler: router,
},
orderService: orderService,
errCh: make(chan error),
}
// 注册路由
router.POST("/order", rs.create)
router.GET("/order/:id", rs.retrieve)
router.PUT("/order", rs.update)
router.DELETE("/order", rs.delete)
router.GET("/order", rs.list)
return rs
}
// Start 在后台启动 REST 服务,将错误推入错误通道
func (r RestServer) Start() {
go func() {
r.errCh <- r.server.ListenAndServe()
}()
}
// Stop 停止服务
func (r RestServer) Stop() error {
return r.server.Close()
}
// Error 返回服务端的错误通道
func (r RestServer) Error() chan error {
return r.errCh
}
// create 是一个处理函数,它根据订单请求创建订单 (JSON 主体)
func (r RestServer) create(c *gin.Context) {
var req protos.CreateOrderRequest
// unmarshal 订单请求
err := jsonpb.Unmarshal(c.Request.Body, &req)
if err != nil {
c.String(http.StatusInternalServerError, "error creating order request")
}
// 根据请求,使用订单服务创建订单
resp, err := r.orderService.Create(c.Request.Context(), &req)
if err != nil {
c.String(http.StatusInternalServerError, "error creating order")
}
m := &jsonpb.Marshaler{}
if err := m.Marshal(c.Writer, resp); err != nil {
c.String(http.StatusInternalServerError, "error sending order response")
}
}
func (r RestServer) retrieve(c *gin.Context) {
c.String(http.StatusNotImplemented, "not implemented yet")
}
func (r RestServer) update(c *gin.Context) {
c.String(http.StatusNotImplemented, "not implemented yet")
}
func (r RestServer) delete(c *gin.Context) {
c.String(http.StatusNotImplemented, "not implemented yet")
}
func (r RestServer) list(c *gin.Context) {
c.String(http.StatusNotImplemented, "not implemented yet")
}
|
// Copyright (c) 2020 - for information on the respective copyright owner
// see the NOTICE file and/or the repository at
// https://github.com/hyperledger-labs/perun-node
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"github.com/abiosoft/ishell"
"github.com/hyperledger-labs/perun-node/api/grpc/pb"
)
var (
sessionCmdUsage = "Usage: session [sub-command]"
sessionCmd = &ishell.Cmd{
Name: "session",
Help: "Use this command to open and close sessions." + sessionCmdUsage,
Func: sessionFn,
}
sessionOpenCmdUsage = "Usage: session open [session config file]"
sessionOpenCmd = &ishell.Cmd{
Name: "open",
Help: "Open a new session. Use tab completion to cycle through default values." + sessionOpenCmdUsage,
Completer: func([]string) []string {
return []string{"alice/session.yaml", "bob/session.yaml"} // Provide default values as autocompletion.
},
Func: sessionOpenFn,
}
sessionCloseOpts = []string{"force", "no-force"}
sessionCloseCmdUsage = "Usage: session close force|no-force"
sessionCloseCmd = &ishell.Cmd{
Name: "close",
Help: "Close the current session. Force will persist open chs" + sessionCloseCmdUsage,
Completer: func([]string) []string {
return sessionCloseOpts
},
Func: sessionCloseFn,
}
)
func init() {
sessionCmd.AddCmd(sessionOpenCmd)
sessionCmd.AddCmd(sessionCloseCmd)
}
func sessionFn(c *ishell.Context) {
if client == nil {
printNodeNotConnectedError(c)
return
}
c.Println(c.Cmd.HelpText())
}
func sessionOpenFn(c *ishell.Context) {
if client == nil {
printNodeNotConnectedError(c)
return
}
countReqArgs := 1
if len(c.Args) != countReqArgs {
printArgCountError(c, countReqArgs)
return
}
req := pb.OpenSessionReq{
ConfigFile: c.Args[0],
}
resp, err := client.OpenSession(context.Background(), &req)
if err != nil {
printCommandSendingError(c, err)
return
}
msgErr, ok := resp.Response.(*pb.OpenSessionResp_Error)
if ok {
c.Printf("%s\n\n", redf("Error opening session : %v", msgErr.Error.Error))
return
}
msg := resp.Response.(*pb.OpenSessionResp_MsgSuccess_)
sessionID = msg.MsgSuccess.SessionID
c.Printf("%s\n\n", greenf("Session opened."))
for i := range msg.MsgSuccess.RestoredChs {
chAlias := addOpenChannelID(msg.MsgSuccess.RestoredChs[i].ChID,
findPeerAlias(msg.MsgSuccess.RestoredChs[i].BalInfo.Parts))
c.Printf("%s\n", greenf("Channel restored. Alias: %s.\n%s.", chAlias,
prettifyPayChInfo(msg.MsgSuccess.RestoredChs[i])))
paymentSub(c, chAlias)
}
c.Printf("\n")
// Automatically subscribe to channel opening request notifications in this session.
channelSub(c)
}
func sessionCloseFn(c *ishell.Context) {
if client == nil {
printNodeNotConnectedError(c)
return
}
countReqArgs := 1
if len(c.Args) != countReqArgs {
printArgCountError(c, countReqArgs)
return
}
channelUnsub(c) // Close the channel opening request subscriptions before closing the session.
req := pb.CloseSessionReq{
SessionID: sessionID,
}
if c.Args[0] == "force" {
req.Force = true
} else if c.Args[0] == "no-force" {
req.Force = false
} else {
c.Printf("%s\n\n", redf("Parameter should be one of these values: %v", sessionCloseOpts))
return
}
resp, err := client.CloseSession(context.Background(), &req)
if err != nil {
printCommandSendingError(c, err)
return
}
msgErr, ok := resp.Response.(*pb.CloseSessionResp_Error)
if ok {
channelSub(c) // If there is an error in session close, re-subscribe to channel opening request notifications.
c.Printf("%s\n\n", redf("Error closing session : %v", msgErr.Error.Error))
return
}
msg := resp.Response.(*pb.CloseSessionResp_MsgSuccess_)
resetLocalCache()
c.Printf("%s\n\n", greenf("Session closed. ID: %s.", sessionID))
if c.Args[0] == "force" {
for i := range msg.MsgSuccess.OpenPayChsInfo {
chAlias := openChannelsRevMap[msg.MsgSuccess.OpenPayChsInfo[i].ChID]
c.Printf("%s\n", greenf("Channel persisted. Alias: %s.\n%s.", chAlias,
prettifyPayChInfo(msg.MsgSuccess.OpenPayChsInfo[i])))
}
}
}
func resetLocalCache() {
channelNotifCounter = 0
channelNotifList = []string{}
channelNotifMap = make(map[string]*pb.SubPayChProposalsResp_Notify)
openChannelsCounter = 0
openChannelsMap = make(map[string]*openChannelInfo)
openChannelsRevMap = make(map[string]string)
openChannelsList = []string{}
knownAliasesList = []string{}
}
|
package main
import (
"fmt"
"os"
)
type Piece struct {
allowed map[string]([]string)
}
type Direction struct {
modx int
mody int
out string
}
var pieces = make(map[int]Piece)
var directions = make(map[string]Direction)
func main() {
var x, y, ex int
fmt.Scanf("%d %d", &x, &y)
var cmap = make([][]int, y)
for i := 0; i < y; i++ {
cmap[i] = make([]int, x)
for j := 0; j < x; j++ {
fmt.Scanf("%d", &cmap[i][j])
}
}
fmt.Scanf("%d", &ex)
fmt.Fprintln(os.Stderr, cmap) // Map
fill()
for {
var posx, posy int
var in string
fmt.Scanf("%d %d %s", &posx, &posy, &in)
p := pieces[cmap[posy][posx]]
// Iterate through outs
for _, e := range p.allowed[in] {
nposx, nposy := posx+directions[e].modx, posy+directions[e].mody
// Checks if in range
if nposx >= 0 && nposx < x && nposy >= 0 && nposy < y {
np := pieces[cmap[nposy][nposx]]
for k, _ := range np.allowed {
if k == directions[e].out {
fmt.Printf("%d %d \n", nposx, nposy)
break
}
}
}
}
}
}
func fill() {
directions["TOP"] = Direction{0, -1, "DOWN"}
directions["DOWN"] = Direction{0, 1, "TOP"}
directions["LEFT"] = Direction{-1, 0, "RIGHT"}
directions["RIGHT"] = Direction{1, 0, "LEFT"}
pieces[0] = Piece{map[string]([]string){}}
pieces[1] = Piece{map[string]([]string){"TOP": []string{"DOWN"}, "RIGHT": []string{"DOWN"}, "LEFT": []string{"DOWN"}}}
pieces[2] = Piece{map[string]([]string){"RIGHT": []string{"LEFT"}, "LEFT": []string{"RIGHT"}}}
pieces[3] = Piece{map[string]([]string){"TOP": []string{"DOWN"}}}
pieces[4] = Piece{map[string]([]string){"TOP": []string{"LEFT"}, "RIGHT": []string{"DOWN"}}}
pieces[5] = Piece{map[string]([]string){"TOP": []string{"RIGHT"}, "LEFT": []string{"DOWN"}}}
pieces[6] = Piece{map[string]([]string){"LEFT": []string{"RIGHT"}, "RIGHT": []string{"LEFT"}}}
pieces[7] = Piece{map[string]([]string){"TOP": []string{"DOWN"}, "RIGHT": []string{"DOWN"}}}
pieces[8] = Piece{map[string]([]string){"LEFT": []string{"DOWN"}, "RIGHT": []string{"DOWN"}}}
pieces[9] = Piece{map[string]([]string){"LEFT": []string{"DOWN"}, "TOP": []string{"DOWN"}}}
pieces[10] = Piece{map[string]([]string){"TOP": []string{"LEFT"}}}
pieces[11] = Piece{map[string]([]string){"TOP": []string{"RIGHT"}}}
pieces[12] = Piece{map[string]([]string){"RIGHT": []string{"DOWN"}}}
pieces[13] = Piece{map[string]([]string){"LEFT": []string{"DOWN"}}}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"strconv"
"time"
"chromiumos/tast/common/servo"
"chromiumos/tast/errors"
"chromiumos/tast/remote/firmware"
"chromiumos/tast/remote/firmware/fixture"
"chromiumos/tast/remote/firmware/reporters"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: DevModeStress,
Desc: "Test mode aware reboot and suspend preserve dev mode over several iterations",
Contacts: []string{"tij@google.com", "cros-fw-engprod@google.com"},
Attr: []string{"group:firmware", "firmware_unstable"},
Vars: []string{"firmware.DevModeStressIters"},
HardwareDeps: hwdep.D(hwdep.ChromeEC()),
Timeout: 45 * time.Minute,
Fixture: fixture.DevMode,
})
}
func DevModeStress(ctx context.Context, s *testing.State) {
h := s.FixtValue().(*fixture.Value).Helper
if err := h.RequireServo(ctx); err != nil {
s.Fatal("Failed to connect to servo: ", err)
}
numIters := 10
if numItersStr, ok := s.Var("firmware.DevModeStressIters"); ok {
numItersInt, err := strconv.Atoi(numItersStr)
if err != nil {
s.Fatalf("Invalid value for var firmware.DevModeStressIters: got %q, expected int", numItersStr)
} else {
numIters = numItersInt
}
}
verifyBootMode := func() error {
if mainfwType, err := h.Reporter.CrossystemParam(ctx, reporters.CrossystemParamMainfwType); err != nil {
return errors.Wrap(err, "failed to get crossystem mainfw_type")
} else if mainfwType != "developer" {
return errors.Errorf("expected mainfw_type to be 'developer', got %q", mainfwType)
}
if devswBoot, err := h.Reporter.CrossystemParam(ctx, reporters.CrossystemParamDevswBoot); err != nil {
return errors.Wrap(err, "failed to get crossystem devsw_boot")
} else if devswBoot != "1" {
return errors.Errorf("expected devsw_boot to be 1, got %s", devswBoot)
}
return nil
}
// Fixture ensures initially in dev mode, so for first iteration boot mode doesn't need to be checked.
for i := 0; i < numIters; i++ {
s.Logf("Running iteration %d out of %d ", i+1, numIters)
s.Log("Performing mode aware reboot")
ms, err := firmware.NewModeSwitcher(ctx, h)
if err != nil {
s.Fatal("Failed to create mode switcher: ", err)
}
if err := ms.ModeAwareReboot(ctx, firmware.WarmReset); err != nil {
s.Fatal("Failed to perform mode aware reboot: ", err)
}
s.Log("Verifying boot mode is developer")
if err := verifyBootMode(); err != nil {
s.Fatal("Failed boot mode check: ", err)
}
s.Log("Suspending DUT")
cmd := h.DUT.Conn().CommandContext(ctx, "powerd_dbus_suspend", "--delay=5")
if err := cmd.Start(); err != nil {
s.Fatal("Failed to suspend DUT: ", err)
}
s.Log("Sleeping for 5 seconds")
if err := testing.Sleep(ctx, 5*time.Second); err != nil {
s.Fatal("Failed to sleep waiting for suspend: ", err)
}
s.Log("Checking for S0ix or S3 powerstate")
if err := h.WaitForPowerStates(ctx, 1*time.Second, 60*time.Second, "S0ix", "S3"); err != nil {
s.Fatal("Failed to get S0ix or S3 powerstate: ", err)
}
s.Log("Sleeping for 5 seconds")
if err := testing.Sleep(ctx, 5*time.Second); err != nil {
s.Fatal("Failed to sleep waiting for suspend: ", err)
}
s.Log("Pressing power key to wake device")
if err := h.Servo.KeypressWithDuration(ctx, servo.PowerKey, servo.DurPress); err != nil {
s.Fatal("Failed to press power key: ", err)
}
s.Log("Wait for DUT to connect")
if err := h.WaitConnect(ctx); err != nil {
s.Fatal("Failed to wait for device to connect: ", err)
}
s.Log("Verifying boot mode is developer")
if err := verifyBootMode(); err != nil {
s.Fatal("Failed boot mode check: ", err)
}
}
}
|
package errors
// cause.go contains interface and unwrap func for the deprecated causer interface we adopted from pkg/errors
// causer returns the underlying error, a error without cause should return itself.
// It is based on the private `causer` interface in pkg/errors, so errors wrapped using pkg/errors can also be handled
// Deprecated: Use Wrapper interface instead,
type causer interface {
Cause() error
}
// Cause returns root cause of the error (if any), it stops at the last error that does not implement causer interface.
// If you want get direct cause, use DirectCause.
// If error is nil, it will return nil. If error is not wrapped it will return the error itself.
// error wrapped using https://github.com/pkg/errors also satisfies this interface and can be unwrapped as well.
// TODO: might consider rename it to Unwrap since we are deprecating causer interface
func Cause(err error) error {
if err == nil {
return nil
}
for err != nil {
switch err.(type) {
case Wrapper:
err = err.(Wrapper).Unwrap()
case causer:
err = err.(causer).Cause()
default:
return err
}
}
return err
}
// DirectCause returns the direct cause of the error (if any).
// It does NOT follow the cause chain all the way down, just the first one (if any),
// If you want to get root cause, use Cause
func DirectCause(err error) error {
if err == nil {
return nil
}
switch err.(type) {
case Wrapper:
return err.(Wrapper).Unwrap()
case causer:
return err.(causer).Cause()
default:
return err
}
}
|
package client
import (
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
push "github.com/prometheus/client_golang/prometheus/push"
)
var (
promNamespace = "nemesis"
// Prometheus metrics
// Total resources scanned
totalResourcesCounter = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: promNamespace,
Name: "total_resources_scanned",
Help: "Total number of resources scanned",
},
)
// Report summaries, reported by type, status, and project
reportSummary = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: promNamespace,
Name: "report_summary",
Help: "Report summaries by type, status, and project",
},
[]string{"type", "name", "status", "project"},
)
)
// configureMetrics is a helper function for configuring metrics.
// Since we use a push gateway, we must configure our metrics as a push model
func configureMetrics() *push.Pusher {
// Only configure metrics collection if enabled
if *flagMetricsEnabled {
// Create the prometheus registry. We explicitly declare a registry rather than
// depend on the default registry
registry := prometheus.NewRegistry()
// Register the necessary metrics
registry.MustRegister(totalResourcesCounter)
registry.MustRegister(reportSummary)
// Configure the gateway and return the pusher
pusher := push.New(*flagMetricsGateway, "nemesis_audit").Gatherer(registry)
return pusher
}
return nil
}
// incrementMetrics is a small helper to consolidate reporting metrics that are reported for all resources
func (c *Client) incrementMetrics(typ string, name string, status string, projectID string) {
totalResourcesCounter.Inc()
reportSummary.WithLabelValues(typ, name, status, projectID).Inc()
}
// PushMetrics pushes the collected metrics from this client. Should only be called once.
func (c *Client) PushMetrics() error {
// Only push metrics if we configured it
if c.pusher != nil {
if c.metricsArePushed {
return errors.New("Metrics were already pushed, make sure client.PushMetrics is only called once")
}
if err := c.pusher.Add(); err != nil {
return fmt.Errorf("Failed to push metrics to gateway: %v", err)
}
// Indicate that metrics for the client have already been pushed
c.metricsArePushed = true
}
return nil
}
|
package main
import (
"log"
"github.com/spf13/cobra"
)
func install(cmd *cobra.Command, args []string) {
log.Println("tbd")
}
|
package client
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
)
func TestAddsGivenParameters(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, fmt.Sprintf("%s", r.URL))
}))
parameters := map[string]string{
"custom": "param",
}
client := New(testServer.URL, "mykey", "private")
response, _ := client.Get("/trello/api", parameters)
u, _ := url.Parse(string(response))
q := u.Query()
if strings.TrimSpace(q.Get("custom")) != "param" {
t.Error("Should have included the parameter")
}
}
func TestAddNoParameters(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, fmt.Sprintf("%s", r.URL))
}))
client := New(testServer.URL, "mykey", "private")
response, _ := client.Get("/trello/api")
u, _ := url.Parse(string(response))
var expected = "/trello/api?key=mykey&token=private"
if strings.TrimSpace(u.String()) != expected {
t.Errorf("Expected '%s' got '%s'", expected, strings.TrimSpace(u.String()))
}
}
func TestReturnsErrorOn500(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Oops", http.StatusInternalServerError)
}))
client := New(testServer.URL, "user", "pass")
_, err := client.Get("/api/agents")
var expected = "Response code 500"
if err.Error() != expected {
t.Errorf("Expected '%s' got '%s'", expected, err.Error())
}
}
func TestReturnsErrorOn404(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Oops", http.StatusNotFound)
}))
client := New(testServer.URL, "user", "pass")
_, err := client.Get("/api/agents")
var expected = "Response code 404"
if err.Error() != expected {
t.Errorf("Expected '%s' got '%s'", expected, err.Error())
}
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func main() {
seed := "1113122113"
fmt.Println("------- Part 1 -------")
after40 := LookAndSayMultipleTimes(seed, 40)
fmt.Printf("After 40 rounds, the result has length %d\n\n", len(after40))
fmt.Println("------- Part 2 -------")
after50 := LookAndSayMultipleTimes(after40, 10)
fmt.Printf("After 50 rounds, the result has length %d\n\n", len(after50))
}
func LookAndSay(input string) string {
lastChar := '0'
lastCount := 0
var result strings.Builder
for i := 0; i < len(input); i++ {
if rune(input[i]) == lastChar {
lastCount++
} else {
if lastChar != '0' {
result.WriteString(strconv.Itoa(lastCount))
result.WriteRune(lastChar)
}
lastChar = rune(input[i])
lastCount = 1
}
}
result.WriteString(strconv.Itoa(lastCount))
result.WriteRune(lastChar)
return result.String()
}
func LookAndSayMultipleTimes(str string, rounds int) string {
for i := 0; i < rounds; i++ {
str = LookAndSay(str)
}
return str
}
|
package text
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestReleaseNotes(t *testing.T) {
expected := "\n\n## Features :rocket:\n\n- 0000000 ci test\n\n## Bug fixes :bug:\n\n- 0000000 huge bug\n\n## Chores and Improvements :wrench:\n\n- 0000000 testing\n- 0000000 this should end up in chores\n\n## Other :package:\n\n- 0000000 merge master in something\n- 0000000 random\n\n"
sections := Sections{
Features: []Commit{Commit{Category: "feat", Scope: "ci", Heading: "ci test"}},
Chores: []Commit{Commit{Category: "chore", Scope: "", Heading: "testing"}, Commit{Category: "improvement", Scope: "", Heading: "this should end up in chores"}},
Bugs: []Commit{Commit{Category: "bug", Scope: "", Heading: "huge bug"}},
Others: []Commit{Commit{Category: "other", Scope: "", Heading: "merge master in something"}, Commit{Category: "bs", Scope: "", Heading: "random"}},
}
releaseNotes := ReleaseNotes(sections)
assert.Equal(t, expected, releaseNotes)
}
func TestReleaseNotesWithMissingSections(t *testing.T) {
expected := "\n\n## Features :rocket:\n\n- 0000000 ci test\n\n"
sections := Sections{
Features: []Commit{Commit{Heading: "ci test"}},
}
releaseNotes := ReleaseNotes(sections)
assert.Equal(t, expected, releaseNotes)
}
|
//go:build e2e
package setup
import (
"github.com/Dynatrace/dynatrace-operator/test/csi"
"github.com/Dynatrace/dynatrace-operator/test/dynakube"
"github.com/Dynatrace/dynatrace-operator/test/kubeobjects/manifests"
"github.com/Dynatrace/dynatrace-operator/test/oneagent"
"github.com/Dynatrace/dynatrace-operator/test/operator"
"github.com/Dynatrace/dynatrace-operator/test/secrets"
"github.com/Dynatrace/dynatrace-operator/test/webhook"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func DeploySampleApps(builder *features.FeatureBuilder, deploymentPath string) {
builder.Setup(manifests.InstallFromFile(deploymentPath))
}
func InstallDynatraceFromSource(builder *features.FeatureBuilder, secretConfig *secrets.Secret) {
if secretConfig != nil {
builder.Setup(secrets.ApplyDefault(*secretConfig))
}
builder.Setup(operator.InstallViaMake(true))
}
func InstallDynatraceFromGithub(builder *features.FeatureBuilder, secretConfig *secrets.Secret, releaseTag string) {
if secretConfig != nil {
builder.Setup(secrets.ApplyDefault(*secretConfig))
}
builder.Setup(operator.InstallFromGithub(releaseTag, true))
}
func AssessOperatorDeployment(builder *features.FeatureBuilder) {
builder.Assess("operator started", operator.WaitForDeployment())
builder.Assess("webhook started", webhook.WaitForDeployment())
builder.Assess("csi driver started", csi.WaitForDaemonset())
}
func AssessDynakubeStartup(builder *features.FeatureBuilder) {
builder.Assess("oneagent started", oneagent.WaitForDaemonset())
builder.Assess("dynakube phase changes to 'Running'", dynakube.WaitForDynakubePhase(dynakube.NewBuilder().WithDefaultObjectMeta().Build()))
}
|
package openstacknfv
import (
"io/ioutil"
)
func GetSriovStatus() (bool){
fileName := "/etc/puppet/hieradata/service_names.json"
fileContent, err := ioutil.ReadFile(fileName)
if err != nil {
var data map[string]interface{}
if err := json.Unmarshal([]byte(filecontent), &data); err != nil {
for _, name := range data["service_names"] {
if name == "neutron_sriov_agent" {
return true
}
}
}
}
return false
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mm
import (
"fmt"
"sync/atomic"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/safecopy"
"gvisor.dev/gvisor/pkg/safemem"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/pgalloc"
"gvisor.dev/gvisor/pkg/sentry/usage"
)
// existingPMAsLocked checks that pmas exist for all addresses in ar, and
// support access of type (at, ignorePermissions). If so, it returns an
// iterator to the pma containing ar.Start. Otherwise it returns a terminal
// iterator.
//
// Preconditions:
// - mm.activeMu must be locked.
// - ar.Length() != 0.
func (mm *MemoryManager) existingPMAsLocked(ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) pmaIterator {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
}
first := mm.pmas.FindSegment(ar.Start)
pseg := first
for pseg.Ok() {
pma := pseg.ValuePtr()
perms := pma.effectivePerms
if ignorePermissions {
perms = pma.maxPerms
}
if !perms.SupersetOf(at) {
return pmaIterator{}
}
if needInternalMappings && pma.internalMappings.IsEmpty() {
return pmaIterator{}
}
if ar.End <= pseg.End() {
return first
}
pseg, _ = pseg.NextNonEmpty()
}
// Ran out of pmas before reaching ar.End.
return pmaIterator{}
}
// existingVecPMAsLocked returns true if pmas exist for all addresses in ars,
// and support access of type (at, ignorePermissions).
//
// Preconditions: mm.activeMu must be locked.
func (mm *MemoryManager) existingVecPMAsLocked(ars hostarch.AddrRangeSeq, at hostarch.AccessType, ignorePermissions bool, needInternalMappings bool) bool {
for ; !ars.IsEmpty(); ars = ars.Tail() {
if ar := ars.Head(); ar.Length() != 0 && !mm.existingPMAsLocked(ar, at, ignorePermissions, needInternalMappings).Ok() {
return false
}
}
return true
}
// getPMAsLocked ensures that pmas exist for all addresses in ar, and support
// access of type at. It returns:
//
// - An iterator to the pma containing ar.Start. If no pma contains ar.Start,
// the iterator is unspecified.
//
// - An iterator to the gap after the last pma containing an address in ar. If
// pmas exist for no addresses in ar, the iterator is to a gap that begins
// before ar.Start.
//
// - An error that is non-nil if pmas exist for only a subset of ar.
//
// Preconditions:
// - mm.mappingMu must be locked.
// - mm.activeMu must be locked for writing.
// - ar.Length() != 0.
// - vseg.Range().Contains(ar.Start).
// - vmas must exist for all addresses in ar, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
func (mm *MemoryManager) getPMAsLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
if !vseg.Ok() {
panic("terminal vma iterator")
}
if !vseg.Range().Contains(ar.Start) {
panic(fmt.Sprintf("initial vma %v does not cover start of ar %v", vseg.Range(), ar))
}
}
// Page-align ar so that all AddrRanges are aligned.
end, ok := ar.End.RoundUp()
var alignerr error
if !ok {
end = ar.End.RoundDown()
alignerr = linuxerr.EFAULT
}
ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
pstart, pend, perr := mm.getPMAsInternalLocked(ctx, vseg, ar, at)
if pend.Start() <= ar.Start {
return pmaIterator{}, pend, perr
}
// getPMAsInternalLocked may not have returned pstart due to iterator
// invalidation.
if !pstart.Ok() {
pstart = mm.findOrSeekPrevUpperBoundPMA(ar.Start, pend)
}
if perr != nil {
return pstart, pend, perr
}
return pstart, pend, alignerr
}
// getVecPMAsLocked ensures that pmas exist for all addresses in ars, and
// support access of type at. It returns the subset of ars for which pmas
// exist. If this is not equal to ars, it returns a non-nil error explaining
// why.
//
// Preconditions:
// - mm.mappingMu must be locked.
// - mm.activeMu must be locked for writing.
// - vmas must exist for all addresses in ars, and support accesses of type at
// (i.e. permission checks must have been performed against vmas).
func (mm *MemoryManager) getVecPMAsLocked(ctx context.Context, ars hostarch.AddrRangeSeq, at hostarch.AccessType) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
continue
}
if checkInvariants {
if !ar.WellFormed() {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
}
// Page-align ar so that all AddrRanges are aligned.
end, ok := ar.End.RoundUp()
var alignerr error
if !ok {
end = ar.End.RoundDown()
alignerr = linuxerr.EFAULT
}
ar = hostarch.AddrRange{ar.Start.RoundDown(), end}
_, pend, perr := mm.getPMAsInternalLocked(ctx, mm.vmas.FindSegment(ar.Start), ar, at)
if perr != nil {
return truncatedAddrRangeSeq(ars, arsit, pend.Start()), perr
}
if alignerr != nil {
return truncatedAddrRangeSeq(ars, arsit, pend.Start()), alignerr
}
}
return ars, nil
}
// getPMAsInternalLocked is equivalent to getPMAsLocked, with the following
// exceptions:
//
// - getPMAsInternalLocked returns a pmaIterator on a best-effort basis (that
// is, the returned iterator may be terminal, even if a pma that contains
// ar.Start exists). Returning this iterator on a best-effort basis allows
// callers that require it to use it when it's cheaply available, while also
// avoiding the overhead of retrieving it when it's not.
//
// - getPMAsInternalLocked additionally requires that ar is page-aligned.
// getPMAsInternalLocked is an implementation helper for getPMAsLocked and
// getVecPMAsLocked; other clients should call one of those instead.
func (mm *MemoryManager) getPMAsInternalLocked(ctx context.Context, vseg vmaIterator, ar hostarch.AddrRange, at hostarch.AccessType) (pmaIterator, pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
if !vseg.Ok() {
panic("terminal vma iterator")
}
if !vseg.Range().Contains(ar.Start) {
panic(fmt.Sprintf("initial vma %v does not cover start of ar %v", vseg.Range(), ar))
}
}
memCgID := pgalloc.MemoryCgroupIDFromContext(ctx)
opts := pgalloc.AllocOpts{Kind: usage.Anonymous, Dir: pgalloc.BottomUp, MemCgID: memCgID}
vma := vseg.ValuePtr()
if uintptr(ar.Start) < atomic.LoadUintptr(&vma.lastFault) {
// Detect cases where memory is accessed downwards and change memory file
// allocation order to increase the chances that pages are coalesced.
opts.Dir = pgalloc.TopDown
}
atomic.StoreUintptr(&vma.lastFault, uintptr(ar.Start))
mf := mm.mfp.MemoryFile()
// Limit the range we allocate to ar, aligned to privateAllocUnit.
maskAR := privateAligned(ar)
didUnmapAS := false
// The range in which we iterate vmas and pmas is still limited to ar, to
// ensure that we don't allocate or COW-break a pma we don't need.
pseg, pgap := mm.pmas.Find(ar.Start)
pstart := pseg
for {
// Get pmas for this vma.
vsegAR := vseg.Range().Intersect(ar)
vma := vseg.ValuePtr()
pmaLoop:
for {
switch {
case pgap.Ok() && pgap.Start() < vsegAR.End:
// Need a pma here.
optAR := vseg.Range().Intersect(pgap.Range())
if checkInvariants {
if optAR.Length() == 0 {
panic(fmt.Sprintf("vseg %v and pgap %v do not overlap", vseg, pgap))
}
}
if vma.mappable == nil {
// Private anonymous mappings get pmas by allocating.
allocAR := optAR.Intersect(maskAR)
fr, err := mf.Allocate(uint64(allocAR.Length()), opts)
if err != nil {
return pstart, pgap, err
}
if checkInvariants {
if !fr.WellFormed() || fr.Length() != uint64(allocAR.Length()) {
panic(fmt.Sprintf("Allocate(%v) returned invalid FileRange %v", allocAR.Length(), fr))
}
}
mm.addRSSLocked(allocAR)
mm.incPrivateRef(fr)
mf.IncRef(fr, memCgID)
pseg, pgap = mm.pmas.Insert(pgap, allocAR, pma{
file: mf,
off: fr.Start,
translatePerms: hostarch.AnyAccess,
effectivePerms: vma.effectivePerms,
maxPerms: vma.maxPerms,
// Since we just allocated this memory and have the
// only reference, the new pma does not need
// copy-on-write.
private: true,
}).NextNonEmpty()
pstart = pmaIterator{} // iterators invalidated
} else {
// Other mappings get pmas by translating.
optMR := vseg.mappableRangeOf(optAR)
reqAR := optAR.Intersect(ar)
reqMR := vseg.mappableRangeOf(reqAR)
perms := at
if vma.private {
// This pma will be copy-on-write; don't require write
// permission, but do require read permission to
// facilitate the copy.
//
// If at.Write is true, we will need to break
// copy-on-write immediately, which occurs after
// translation below.
perms.Read = true
perms.Write = false
}
ts, err := vma.mappable.Translate(ctx, reqMR, optMR, perms)
if checkInvariants {
if err := memmap.CheckTranslateResult(reqMR, optMR, perms, ts, err); err != nil {
panic(fmt.Sprintf("Mappable(%T).Translate(%v, %v, %v): %v", vma.mappable, reqMR, optMR, perms, err))
}
}
// Install a pma for each translation.
if len(ts) == 0 {
return pstart, pgap, err
}
pstart = pmaIterator{} // iterators invalidated
for _, t := range ts {
newpmaAR := vseg.addrRangeOf(t.Source)
newpma := pma{
file: t.File,
off: t.Offset,
translatePerms: t.Perms,
effectivePerms: vma.effectivePerms.Intersect(t.Perms),
maxPerms: vma.maxPerms.Intersect(t.Perms),
}
if vma.private {
newpma.effectivePerms.Write = false
newpma.maxPerms.Write = false
newpma.needCOW = true
}
mm.addRSSLocked(newpmaAR)
t.File.IncRef(t.FileRange(), memCgID)
// This is valid because memmap.Mappable.Translate is
// required to return Translations in increasing
// Translation.Source order.
pseg = mm.pmas.Insert(pgap, newpmaAR, newpma)
pgap = pseg.NextGap()
}
// The error returned by Translate is only significant if
// it occurred before ar.End.
if err != nil && vseg.addrRangeOf(ts[len(ts)-1].Source).End < ar.End {
return pstart, pgap, err
}
// Rewind pseg to the first pma inserted and continue the
// loop to check if we need to break copy-on-write.
pseg, pgap = mm.findOrSeekPrevUpperBoundPMA(vseg.addrRangeOf(ts[0].Source).Start, pgap), pmaGapIterator{}
continue
}
case pseg.Ok() && pseg.Start() < vsegAR.End:
oldpma := pseg.ValuePtr()
if at.Write && mm.isPMACopyOnWriteLocked(vseg, pseg) {
// Break copy-on-write by copying.
if checkInvariants {
if !oldpma.maxPerms.Read {
panic(fmt.Sprintf("pma %v needs to be copied for writing, but is not readable: %v", pseg.Range(), oldpma))
}
}
var copyAR hostarch.AddrRange
if vma := vseg.ValuePtr(); vma.effectivePerms.Execute {
// The majority of copy-on-write breaks on executable
// pages come from:
//
// - The ELF loader, which must zero out bytes on the
// last page of each segment after the end of the
// segment.
//
// - gdb's use of ptrace to insert breakpoints.
//
// Neither of these cases has enough spatial locality
// to benefit from copying nearby pages, so if the vma
// is executable, only copy the pages required.
copyAR = pseg.Range().Intersect(ar)
} else if vma.growsDown {
// In most cases, the new process will not use most of
// its stack before exiting or invoking execve(); it is
// especially unlikely to return very far down its call
// stack, since async-signal-safety concerns in
// multithreaded programs prevent the new process from
// being able to do much. So only copy up to one page
// before and after the pages required.
stackMaskAR := ar
if newStart := stackMaskAR.Start - hostarch.PageSize; newStart < stackMaskAR.Start {
stackMaskAR.Start = newStart
}
if newEnd := stackMaskAR.End + hostarch.PageSize; newEnd > stackMaskAR.End {
stackMaskAR.End = newEnd
}
copyAR = pseg.Range().Intersect(stackMaskAR)
} else {
copyAR = pseg.Range().Intersect(maskAR)
}
// Get internal mappings from the pma to copy from.
if err := pseg.getInternalMappingsLocked(); err != nil {
return pstart, pseg.PrevGap(), err
}
// Copy contents.
fr, err := mf.Allocate(uint64(copyAR.Length()), pgalloc.AllocOpts{
Kind: usage.Anonymous,
Mode: pgalloc.AllocateAndWritePopulate,
MemCgID: memCgID,
Reader: &safemem.BlockSeqReader{mm.internalMappingsLocked(pseg, copyAR)},
})
if _, ok := err.(safecopy.BusError); ok {
// If we got SIGBUS during the copy, deliver SIGBUS to
// userspace (instead of SIGSEGV) if we're breaking
// copy-on-write due to application page fault.
err = &memmap.BusError{err}
}
if fr.Length() == 0 {
return pstart, pseg.PrevGap(), err
}
// Unmap all of maskAR, not just copyAR, to minimize host
// syscalls. AddressSpace mappings must be removed before
// mm.decPrivateRef().
if !didUnmapAS {
mm.unmapASLocked(maskAR)
didUnmapAS = true
}
// Replace the pma with a copy in the part of the address
// range where copying was successful. This doesn't change
// RSS.
copyAR.End = copyAR.Start + hostarch.Addr(fr.Length())
if copyAR != pseg.Range() {
pseg = mm.pmas.Isolate(pseg, copyAR)
pstart = pmaIterator{} // iterators invalidated
}
oldpma = pseg.ValuePtr()
if oldpma.private {
mm.decPrivateRef(pseg.fileRange())
}
oldpma.file.DecRef(pseg.fileRange())
mm.incPrivateRef(fr)
mf.IncRef(fr, memCgID)
oldpma.file = mf
oldpma.off = fr.Start
oldpma.translatePerms = hostarch.AnyAccess
oldpma.effectivePerms = vma.effectivePerms
oldpma.maxPerms = vma.maxPerms
oldpma.needCOW = false
oldpma.private = true
oldpma.internalMappings = safemem.BlockSeq{}
// Try to merge the pma with its neighbors.
if prev := pseg.PrevSegment(); prev.Ok() {
if merged := mm.pmas.Merge(prev, pseg); merged.Ok() {
pseg = merged
pstart = pmaIterator{} // iterators invalidated
}
}
if next := pseg.NextSegment(); next.Ok() {
if merged := mm.pmas.Merge(pseg, next); merged.Ok() {
pseg = merged
pstart = pmaIterator{} // iterators invalidated
}
}
// The error returned by AllocateAndFill is only
// significant if it occurred before ar.End.
if err != nil && pseg.End() < ar.End {
return pstart, pseg.NextGap(), err
}
// Ensure pseg and pgap are correct for the next iteration
// of the loop.
pseg, pgap = pseg.NextNonEmpty()
} else if !oldpma.translatePerms.SupersetOf(at) {
// Get new pmas (with sufficient permissions) by calling
// memmap.Mappable.Translate again.
if checkInvariants {
if oldpma.private {
panic(fmt.Sprintf("private pma %v has non-maximal pma.translatePerms: %v", pseg.Range(), oldpma))
}
}
// Allow the entire pma to be replaced.
optAR := pseg.Range()
optMR := vseg.mappableRangeOf(optAR)
reqAR := optAR.Intersect(ar)
reqMR := vseg.mappableRangeOf(reqAR)
perms := oldpma.translatePerms.Union(at)
ts, err := vma.mappable.Translate(ctx, reqMR, optMR, perms)
if checkInvariants {
if err := memmap.CheckTranslateResult(reqMR, optMR, perms, ts, err); err != nil {
panic(fmt.Sprintf("Mappable(%T).Translate(%v, %v, %v): %v", vma.mappable, reqMR, optMR, perms, err))
}
}
// Remove the part of the existing pma covered by new
// Translations, then insert new pmas. This doesn't change
// RSS. Note that we don't need to call unmapASLocked: any
// existing AddressSpace mappings are still valid (though
// less permissive than the new pmas indicate) until
// Invalidate is called, and will be replaced by future
// calls to mapASLocked.
if len(ts) == 0 {
return pstart, pseg.PrevGap(), err
}
transMR := memmap.MappableRange{ts[0].Source.Start, ts[len(ts)-1].Source.End}
transAR := vseg.addrRangeOf(transMR)
pseg = mm.pmas.Isolate(pseg, transAR)
pseg.ValuePtr().file.DecRef(pseg.fileRange())
pgap = mm.pmas.Remove(pseg)
pstart = pmaIterator{} // iterators invalidated
for _, t := range ts {
newpmaAR := vseg.addrRangeOf(t.Source)
newpma := pma{
file: t.File,
off: t.Offset,
translatePerms: t.Perms,
effectivePerms: vma.effectivePerms.Intersect(t.Perms),
maxPerms: vma.maxPerms.Intersect(t.Perms),
}
if vma.private {
newpma.effectivePerms.Write = false
newpma.maxPerms.Write = false
newpma.needCOW = true
}
t.File.IncRef(t.FileRange(), memCgID)
pseg = mm.pmas.Insert(pgap, newpmaAR, newpma)
pgap = pseg.NextGap()
}
// The error returned by Translate is only significant if
// it occurred before ar.End.
if err != nil && pseg.End() < ar.End {
return pstart, pgap, err
}
// Ensure pseg and pgap are correct for the next iteration
// of the loop.
if pgap.Range().Length() == 0 {
pseg, pgap = pgap.NextSegment(), pmaGapIterator{}
} else {
pseg = pmaIterator{}
}
} else {
// We have a usable pma; continue.
pseg, pgap = pseg.NextNonEmpty()
}
default:
break pmaLoop
}
}
// Go to the next vma.
if ar.End <= vseg.End() {
if pgap.Ok() {
return pstart, pgap, nil
}
return pstart, pseg.PrevGap(), nil
}
vseg = vseg.NextSegment()
}
}
const (
// When memory is allocated for a private pma, align the allocated address
// range to a privateAllocUnit boundary when possible. Larger values of
// privateAllocUnit may reduce page faults by allowing fewer, larger pmas
// to be mapped, but may result in larger amounts of wasted memory in the
// presence of fragmentation. privateAllocUnit must be a power-of-2
// multiple of hostarch.PageSize.
privateAllocUnit = hostarch.HugePageSize
privateAllocMask = privateAllocUnit - 1
)
func privateAligned(ar hostarch.AddrRange) hostarch.AddrRange {
aligned := hostarch.AddrRange{ar.Start &^ privateAllocMask, ar.End}
if end := (ar.End + privateAllocMask) &^ privateAllocMask; end >= ar.End {
aligned.End = end
}
if checkInvariants {
if !aligned.IsSupersetOf(ar) {
panic(fmt.Sprintf("aligned AddrRange %#v is not a superset of ar %#v", aligned, ar))
}
}
return aligned
}
// isPMACopyOnWriteLocked returns true if the contents of the pma represented
// by pseg must be copied to a new private pma to be written to.
//
// If the pma is a copy-on-write private pma, and holds the only reference on
// the memory it maps, isPMACopyOnWriteLocked will take ownership of the memory
// and update the pma to indicate that it does not require copy-on-write.
//
// Preconditions:
// - vseg.Range().IsSupersetOf(pseg.Range()).
// - mm.mappingMu must be locked.
// - mm.activeMu must be locked for writing.
func (mm *MemoryManager) isPMACopyOnWriteLocked(vseg vmaIterator, pseg pmaIterator) bool {
pma := pseg.ValuePtr()
if !pma.needCOW {
return false
}
if !pma.private {
return true
}
// If we have the only reference on private memory to be copied, just take
// ownership of it instead of copying. If we do hold the only reference,
// additional references can only be taken by mm.Fork(), which is excluded
// by mm.activeMu, so this isn't racy.
mm.privateRefs.mu.Lock()
defer mm.privateRefs.mu.Unlock()
fr := pseg.fileRange()
// This check relies on mm.privateRefs.refs being kept fully merged.
rseg := mm.privateRefs.refs.FindSegment(fr.Start)
if rseg.Ok() && rseg.Value() == 1 && fr.End <= rseg.End() {
pma.needCOW = false
// pma.private => pma.translatePerms == hostarch.AnyAccess
vma := vseg.ValuePtr()
pma.effectivePerms = vma.effectivePerms
pma.maxPerms = vma.maxPerms
return false
}
return true
}
// Invalidate implements memmap.MappingSpace.Invalidate.
func (mm *MemoryManager) Invalidate(ar hostarch.AddrRange, opts memmap.InvalidateOpts) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
}
mm.activeMu.Lock()
defer mm.activeMu.Unlock()
if mm.captureInvalidations {
mm.capturedInvalidations = append(mm.capturedInvalidations, invalidateArgs{ar, opts})
return
}
mm.invalidateLocked(ar, opts.InvalidatePrivate, true)
}
// invalidateLocked removes pmas and AddressSpace mappings of those pmas for
// addresses in ar.
//
// Preconditions:
// - mm.activeMu must be locked for writing.
// - ar.Length() != 0.
// - ar must be page-aligned.
func (mm *MemoryManager) invalidateLocked(ar hostarch.AddrRange, invalidatePrivate, invalidateShared bool) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
}
var didUnmapAS bool
pseg := mm.pmas.LowerBoundSegment(ar.Start)
for pseg.Ok() && pseg.Start() < ar.End {
pma := pseg.ValuePtr()
if (invalidatePrivate && pma.private) || (invalidateShared && !pma.private) {
pseg = mm.pmas.Isolate(pseg, ar)
pma = pseg.ValuePtr()
if !didUnmapAS {
// Unmap all of ar, not just pseg.Range(), to minimize host
// syscalls. AddressSpace mappings must be removed before
// mm.decPrivateRef().
//
// Note that we do more than just ar here, and extrapolate
// to the end of any previous region that we may have mapped.
// This is done to ensure that lower layers can fully invalidate
// intermediate pagetable pages during the unmap.
var unmapAR hostarch.AddrRange
if prev := pseg.PrevSegment(); prev.Ok() {
unmapAR.Start = prev.End()
} else {
unmapAR.Start = mm.layout.MinAddr
}
if last := mm.pmas.LowerBoundSegment(ar.End); last.Ok() {
if last.Start() < ar.End {
unmapAR.End = ar.End
} else {
unmapAR.End = last.Start()
}
} else {
unmapAR.End = mm.layout.MaxAddr
}
mm.unmapASLocked(unmapAR)
didUnmapAS = true
}
if pma.private {
mm.decPrivateRef(pseg.fileRange())
}
mm.removeRSSLocked(pseg.Range())
pma.file.DecRef(pseg.fileRange())
pseg = mm.pmas.Remove(pseg).NextSegment()
} else {
pseg = pseg.NextSegment()
}
}
}
// Pin returns the memmap.File ranges currently mapped by addresses in ar in
// mm, acquiring a reference on the returned ranges which the caller must
// release by calling Unpin. If not all addresses are mapped, Pin returns a
// non-nil error. Note that Pin may return both a non-empty slice of
// PinnedRanges and a non-nil error.
//
// Pin does not prevent mapped ranges from changing, making it unsuitable for
// most I/O. It should only be used in contexts that would use get_user_pages()
// in the Linux kernel.
//
// Preconditions:
// - ar.Length() != 0.
// - ar must be page-aligned.
func (mm *MemoryManager) Pin(ctx context.Context, ar hostarch.AddrRange, at hostarch.AccessType, ignorePermissions bool) ([]PinnedRange, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 || !ar.IsPageAligned() {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
}
// Ensure that we have usable vmas.
mm.mappingMu.RLock()
vseg, vend, verr := mm.getVMAsLocked(ctx, ar, at, ignorePermissions)
if vendaddr := vend.Start(); vendaddr < ar.End {
if vendaddr <= ar.Start {
mm.mappingMu.RUnlock()
return nil, verr
}
ar.End = vendaddr
}
// Ensure that we have usable pmas.
mm.activeMu.Lock()
pseg, pend, perr := mm.getPMAsLocked(ctx, vseg, ar, at)
mm.mappingMu.RUnlock()
if pendaddr := pend.Start(); pendaddr < ar.End {
if pendaddr <= ar.Start {
mm.activeMu.Unlock()
return nil, perr
}
ar.End = pendaddr
}
memCgID := pgalloc.MemoryCgroupIDFromContext(ctx)
// Gather pmas.
var prs []PinnedRange
for pseg.Ok() && pseg.Start() < ar.End {
psar := pseg.Range().Intersect(ar)
f := pseg.ValuePtr().file
fr := pseg.fileRangeOf(psar)
f.IncRef(fr, memCgID)
prs = append(prs, PinnedRange{
Source: psar,
File: f,
Offset: fr.Start,
})
pseg = pseg.NextSegment()
}
mm.activeMu.Unlock()
// Return the first error in order of progress through ar.
if perr != nil {
return prs, perr
}
return prs, verr
}
// PinnedRanges are returned by MemoryManager.Pin.
type PinnedRange struct {
// Source is the corresponding range of addresses.
Source hostarch.AddrRange
// File is the mapped file.
File memmap.File
// Offset is the offset into File at which this PinnedRange begins.
Offset uint64
}
// FileRange returns the memmap.File offsets mapped by pr.
func (pr PinnedRange) FileRange() memmap.FileRange {
return memmap.FileRange{pr.Offset, pr.Offset + uint64(pr.Source.Length())}
}
// Unpin releases the reference held by prs.
func Unpin(prs []PinnedRange) {
for i := range prs {
prs[i].File.DecRef(prs[i].FileRange())
}
}
// movePMAsLocked moves all pmas in oldAR to newAR.
//
// Preconditions:
// - mm.activeMu must be locked for writing.
// - oldAR.Length() != 0.
// - oldAR.Length() <= newAR.Length().
// - !oldAR.Overlaps(newAR).
// - mm.pmas.IsEmptyRange(newAR).
// - oldAR and newAR must be page-aligned.
func (mm *MemoryManager) movePMAsLocked(oldAR, newAR hostarch.AddrRange) {
if checkInvariants {
if !oldAR.WellFormed() || oldAR.Length() == 0 || !oldAR.IsPageAligned() {
panic(fmt.Sprintf("invalid oldAR: %v", oldAR))
}
if !newAR.WellFormed() || newAR.Length() == 0 || !newAR.IsPageAligned() {
panic(fmt.Sprintf("invalid newAR: %v", newAR))
}
if oldAR.Length() > newAR.Length() {
panic(fmt.Sprintf("old address range %v may contain pmas that will not fit in new address range %v", oldAR, newAR))
}
if oldAR.Overlaps(newAR) {
panic(fmt.Sprintf("old and new address ranges overlap: %v, %v", oldAR, newAR))
}
// mm.pmas.IsEmptyRange is checked by mm.pmas.Insert.
}
type movedPMA struct {
oldAR hostarch.AddrRange
pma pma
}
var movedPMAs []movedPMA
pseg := mm.pmas.LowerBoundSegment(oldAR.Start)
for pseg.Ok() && pseg.Start() < oldAR.End {
pseg = mm.pmas.Isolate(pseg, oldAR)
movedPMAs = append(movedPMAs, movedPMA{
oldAR: pseg.Range(),
pma: pseg.Value(),
})
pseg = mm.pmas.Remove(pseg).NextSegment()
// No RSS change is needed since we're re-inserting the same pmas
// below.
}
off := newAR.Start - oldAR.Start
pgap := mm.pmas.FindGap(newAR.Start)
for i := range movedPMAs {
mpma := &movedPMAs[i]
pmaNewAR := hostarch.AddrRange{mpma.oldAR.Start + off, mpma.oldAR.End + off}
pgap = mm.pmas.Insert(pgap, pmaNewAR, mpma.pma).NextGap()
}
mm.unmapASLocked(oldAR)
}
// getPMAInternalMappingsLocked ensures that pmas for all addresses in ar have
// cached internal mappings. It returns:
//
// - An iterator to the gap after the last pma with internal mappings
// containing an address in ar. If internal mappings exist for no addresses in
// ar, the iterator is to a gap that begins before ar.Start.
//
// - An error that is non-nil if internal mappings exist for only a subset of
// ar.
//
// Preconditions:
// - mm.activeMu must be locked for writing.
// - pseg.Range().Contains(ar.Start).
// - pmas must exist for all addresses in ar.
// - ar.Length() != 0.
//
// Postconditions: getPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
func (mm *MemoryManager) getPMAInternalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) (pmaGapIterator, error) {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
if !pseg.Range().Contains(ar.Start) {
panic(fmt.Sprintf("initial pma %v does not cover start of ar %v", pseg.Range(), ar))
}
}
for {
if err := pseg.getInternalMappingsLocked(); err != nil {
return pseg.PrevGap(), err
}
if ar.End <= pseg.End() {
return pseg.NextGap(), nil
}
pseg, _ = pseg.NextNonEmpty()
}
}
// getVecPMAInternalMappingsLocked ensures that pmas for all addresses in ars
// have cached internal mappings. It returns the subset of ars for which
// internal mappings exist. If this is not equal to ars, it returns a non-nil
// error explaining why.
//
// Preconditions:
// - mm.activeMu must be locked for writing.
// - pmas must exist for all addresses in ar.
//
// Postconditions: getVecPMAInternalMappingsLocked does not invalidate iterators
// into mm.pmas.
func (mm *MemoryManager) getVecPMAInternalMappingsLocked(ars hostarch.AddrRangeSeq) (hostarch.AddrRangeSeq, error) {
for arsit := ars; !arsit.IsEmpty(); arsit = arsit.Tail() {
ar := arsit.Head()
if ar.Length() == 0 {
continue
}
if pend, err := mm.getPMAInternalMappingsLocked(mm.pmas.FindSegment(ar.Start), ar); err != nil {
return truncatedAddrRangeSeq(ars, arsit, pend.Start()), err
}
}
return ars, nil
}
// internalMappingsLocked returns internal mappings for addresses in ar.
//
// Preconditions:
// - mm.activeMu must be locked.
// - Internal mappings must have been previously established for all addresses
// in ar.
// - ar.Length() != 0.
// - pseg.Range().Contains(ar.Start).
func (mm *MemoryManager) internalMappingsLocked(pseg pmaIterator, ar hostarch.AddrRange) safemem.BlockSeq {
if checkInvariants {
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
if !pseg.Range().Contains(ar.Start) {
panic(fmt.Sprintf("initial pma %v does not cover start of ar %v", pseg.Range(), ar))
}
}
if ar.End <= pseg.End() {
// Since only one pma is involved, we can use pma.internalMappings
// directly, avoiding a slice allocation.
offset := uint64(ar.Start - pseg.Start())
return pseg.ValuePtr().internalMappings.DropFirst64(offset).TakeFirst64(uint64(ar.Length()))
}
var ims []safemem.Block
for {
pr := pseg.Range().Intersect(ar)
for pims := pseg.ValuePtr().internalMappings.DropFirst64(uint64(pr.Start - pseg.Start())).TakeFirst64(uint64(pr.Length())); !pims.IsEmpty(); pims = pims.Tail() {
ims = append(ims, pims.Head())
}
if ar.End <= pseg.End() {
break
}
pseg = pseg.NextSegment()
}
return safemem.BlockSeqFromSlice(ims)
}
// vecInternalMappingsLocked returns internal mappings for addresses in ars.
//
// Preconditions:
// - mm.activeMu must be locked.
// - Internal mappings must have been previously established for all addresses
// in ars.
func (mm *MemoryManager) vecInternalMappingsLocked(ars hostarch.AddrRangeSeq) safemem.BlockSeq {
var ims []safemem.Block
for ; !ars.IsEmpty(); ars = ars.Tail() {
ar := ars.Head()
if ar.Length() == 0 {
continue
}
for pims := mm.internalMappingsLocked(mm.pmas.FindSegment(ar.Start), ar); !pims.IsEmpty(); pims = pims.Tail() {
ims = append(ims, pims.Head())
}
}
return safemem.BlockSeqFromSlice(ims)
}
// incPrivateRef acquires a reference on private pages in fr.
func (mm *MemoryManager) incPrivateRef(fr memmap.FileRange) {
mm.privateRefs.mu.Lock()
defer mm.privateRefs.mu.Unlock()
refSet := &mm.privateRefs.refs
seg, gap := refSet.Find(fr.Start)
for {
switch {
case seg.Ok() && seg.Start() < fr.End:
seg = refSet.Isolate(seg, fr)
seg.SetValue(seg.Value() + 1)
seg, gap = seg.NextNonEmpty()
case gap.Ok() && gap.Start() < fr.End:
seg, gap = refSet.InsertWithoutMerging(gap, gap.Range().Intersect(fr), 1).NextNonEmpty()
default:
refSet.MergeAdjacent(fr)
return
}
}
}
// decPrivateRef releases a reference on private pages in fr.
func (mm *MemoryManager) decPrivateRef(fr memmap.FileRange) {
var freed []memmap.FileRange
mm.privateRefs.mu.Lock()
refSet := &mm.privateRefs.refs
seg := refSet.LowerBoundSegment(fr.Start)
for seg.Ok() && seg.Start() < fr.End {
seg = refSet.Isolate(seg, fr)
if old := seg.Value(); old == 1 {
freed = append(freed, seg.Range())
seg = refSet.Remove(seg).NextSegment()
} else {
seg.SetValue(old - 1)
seg = seg.NextSegment()
}
}
refSet.MergeAdjacent(fr)
mm.privateRefs.mu.Unlock()
mf := mm.mfp.MemoryFile()
for _, fr := range freed {
mf.DecRef(fr)
}
}
// addRSSLocked updates the current and maximum resident set size of a
// MemoryManager to reflect the insertion of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
func (mm *MemoryManager) addRSSLocked(ar hostarch.AddrRange) {
mm.curRSS += uint64(ar.Length())
if mm.curRSS > mm.maxRSS {
mm.maxRSS = mm.curRSS
}
}
// removeRSSLocked updates the current resident set size of a MemoryManager to
// reflect the removal of a pma at ar.
//
// Preconditions: mm.activeMu must be locked for writing.
func (mm *MemoryManager) removeRSSLocked(ar hostarch.AddrRange) {
mm.curRSS -= uint64(ar.Length())
}
// pmaSetFunctions implements segment.Functions for pmaSet.
type pmaSetFunctions struct{}
func (pmaSetFunctions) MinKey() hostarch.Addr {
return 0
}
func (pmaSetFunctions) MaxKey() hostarch.Addr {
return ^hostarch.Addr(0)
}
func (pmaSetFunctions) ClearValue(pma *pma) {
pma.file = nil
pma.internalMappings = safemem.BlockSeq{}
}
func (pmaSetFunctions) Merge(ar1 hostarch.AddrRange, pma1 pma, ar2 hostarch.AddrRange, pma2 pma) (pma, bool) {
if pma1.file != pma2.file ||
pma1.off+uint64(ar1.Length()) != pma2.off ||
pma1.translatePerms != pma2.translatePerms ||
pma1.effectivePerms != pma2.effectivePerms ||
pma1.maxPerms != pma2.maxPerms ||
pma1.needCOW != pma2.needCOW ||
pma1.private != pma2.private {
return pma{}, false
}
// Discard internal mappings instead of trying to merge them, since merging
// them requires an allocation and getting them again from the
// memmap.File might not.
pma1.internalMappings = safemem.BlockSeq{}
return pma1, true
}
func (pmaSetFunctions) Split(ar hostarch.AddrRange, p pma, split hostarch.Addr) (pma, pma) {
newlen1 := uint64(split - ar.Start)
p2 := p
p2.off += newlen1
if !p.internalMappings.IsEmpty() {
p.internalMappings = p.internalMappings.TakeFirst64(newlen1)
p2.internalMappings = p2.internalMappings.DropFirst64(newlen1)
}
return p, p2
}
// findOrSeekPrevUpperBoundPMA returns mm.pmas.UpperBoundSegment(addr), but may do
// so by scanning linearly backward from pgap.
//
// Preconditions:
// - mm.activeMu must be locked.
// - addr <= pgap.Start().
func (mm *MemoryManager) findOrSeekPrevUpperBoundPMA(addr hostarch.Addr, pgap pmaGapIterator) pmaIterator {
if checkInvariants {
if !pgap.Ok() {
panic("terminal pma iterator")
}
if addr > pgap.Start() {
panic(fmt.Sprintf("can't seek backward to %#x from %#x", addr, pgap.Start()))
}
}
// Optimistically check if pgap.PrevSegment() is the PMA we're looking for,
// which is the case if findOrSeekPrevUpperBoundPMA is called to find the
// start of a range containing only a single PMA.
if pseg := pgap.PrevSegment(); pseg.Start() <= addr {
return pseg
}
return mm.pmas.UpperBoundSegment(addr)
}
// getInternalMappingsLocked ensures that pseg.ValuePtr().internalMappings is
// non-empty.
//
// Preconditions: mm.activeMu must be locked for writing.
func (pseg pmaIterator) getInternalMappingsLocked() error {
pma := pseg.ValuePtr()
if pma.internalMappings.IsEmpty() {
// This must use maxPerms (instead of perms) because some permission
// constraints are only visible to vmas; for example, mappings of
// read-only files have vma.maxPerms.Write unset, but this may not be
// visible to the memmap.Mappable.
perms := pma.maxPerms
// We will never execute application code through an internal mapping.
perms.Execute = false
ims, err := pma.file.MapInternal(pseg.fileRange(), perms)
if err != nil {
return err
}
pma.internalMappings = ims
}
return nil
}
func (pseg pmaIterator) fileRange() memmap.FileRange {
return pseg.fileRangeOf(pseg.Range())
}
// Preconditions:
// - pseg.Range().IsSupersetOf(ar).
// - ar.Length != 0.
func (pseg pmaIterator) fileRangeOf(ar hostarch.AddrRange) memmap.FileRange {
if checkInvariants {
if !pseg.Ok() {
panic("terminal pma iterator")
}
if !ar.WellFormed() || ar.Length() == 0 {
panic(fmt.Sprintf("invalid ar: %v", ar))
}
if !pseg.Range().IsSupersetOf(ar) {
panic(fmt.Sprintf("ar %v out of bounds %v", ar, pseg.Range()))
}
}
pma := pseg.ValuePtr()
pstart := pseg.Start()
return memmap.FileRange{pma.off + uint64(ar.Start-pstart), pma.off + uint64(ar.End-pstart)}
}
|
package handlefunc
import (
"os"
"time"
"sync"
"strconv"
"net/http"
"net/url"
"fmt"
"html/template"
"strings"
"encoding/json"
. "DesertEagleSite/bean"
)
var iconHandler http.Handler = http.FileServer(http.Dir("html/image"))
var urlFuncMap map[string] func(w http.ResponseWriter, r *http.Request)
func init() {
urlFuncMap = make(map[string] func(w http.ResponseWriter, r *http.Request))
initSpider()
initFile();
initQiniu();
}
func parseKeyword(r *http.Request, keyname string) (string, bool) {
paramsMap, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
return "", false
}
keyword := paramsMap.Get(keyname)
if len(keyword) > 0 {
return keyword, true
} else {
return "", false
}
}
var mux sync.Mutex
func writeLog(r *http.Request) {
mux.Lock()
defer mux.Unlock()
t := time.Now()
year, month, day := t.Date()
filename := strconv.Itoa(year) + "-" + strconv.Itoa(int(month)) + "-"+ strconv.Itoa(day) + ".log"
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
if err != nil {
return
}
defer file.Close()
file.Write([]byte(t.Format(time.UnixDate)+" "))
file.Write([]byte(r.Method + " " + r.RemoteAddr + " " + r.URL.Path + " " + r.URL.RawQuery + "\n"))
}
func writeResult(w http.ResponseWriter, r *http.Request, msg string, err error) {
var response BaseResponse
if err != nil {
response.Status = "500"
response.Message = err.Error()
} else {
response.Status = "200"
response.Message = msg
}
respBytes, _ := json.Marshal(response)
w.Write(respBytes)
}
func HandleMain(w http.ResponseWriter, r *http.Request) {
writeLog(r)
// go to file server
if r.URL.Path == "/" {
t, err := template.ParseFiles("html/index.html")
if err != nil {
fmt.Println(err)
}
t.Execute(w, nil)
return
}
if r.URL.Path == "/favicon.ico" {
iconHandler.ServeHTTP(w, r);
}
if (r.URL.Path[0:5] == "/html") || (r.URL.Path[0:5] == "/data") {
handleFileServer(w, r);
return;
}
// go json server
if strings.LastIndex(r.URL.Path, "/") <= 0 {
return
}
url := r.URL.Path[1:]
if mhandleFunc, ok := urlFuncMap[url]; ok {
mhandleFunc(w, r)
return
}
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package api_test
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/gorilla/mux"
"github.com/mattermost/mattermost-cloud/internal/api"
"github.com/mattermost/mattermost-cloud/internal/store"
"github.com/mattermost/mattermost-cloud/internal/testlib"
"github.com/mattermost/mattermost-cloud/internal/testutil"
"github.com/mattermost/mattermost-cloud/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClusters(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
t.Run("unknown cluster", func(t *testing.T) {
cluster, err := client.GetCluster(model.NewID())
require.NoError(t, err)
require.Nil(t, cluster)
})
t.Run("no clusters", func(t *testing.T) {
clusters, err := client.GetClusters(&model.GetClustersRequest{
Paging: model.AllPagesWithDeleted(),
})
require.NoError(t, err)
require.Empty(t, clusters)
})
t.Run("get clusters", func(t *testing.T) {
t.Run("invalid page", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/clusters?page=invalid&per_page=100", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
t.Run("invalid perPage", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/clusters?page=0&per_page=invalid", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
t.Run("no paging parameters", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/clusters", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
})
t.Run("missing page", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/clusters?per_page=100", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
})
t.Run("missing perPage", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/clusters?page=1", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
})
})
t.Run("clusters", func(t *testing.T) {
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my-annotation"},
})
require.NoError(t, err)
require.NotNil(t, cluster1)
require.Equal(t, model.ProviderAWS, cluster1.Provider)
require.Equal(t, 1, len(cluster1.Annotations))
assert.True(t, containsAnnotation("my-annotation", cluster1.Annotations))
// require.Equal(t, []string{"zone"}, cluster1.Zones)
actualCluster1, err := client.GetCluster(cluster1.ID)
require.NoError(t, err)
require.Equal(t, cluster1.ID, actualCluster1.ID)
require.Equal(t, model.ProviderAWS, actualCluster1.Provider)
// require.Equal(t, []string{"zone"}, actualCluster1.Zones)
require.Equal(t, model.ClusterStateCreationRequested, actualCluster1.State)
require.Equal(t, cluster1.Annotations, model.SortAnnotations(actualCluster1.Annotations))
time.Sleep(1 * time.Millisecond)
cluster2, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
})
require.NoError(t, err)
require.NotNil(t, cluster2)
require.Equal(t, model.ProviderAWS, cluster2.Provider)
require.Nil(t, cluster2.Annotations)
// require.Equal(t, []string{"zone"}, cluster2.Zones)
actualCluster2, err := client.GetCluster(cluster2.ID)
require.NoError(t, err)
require.Equal(t, cluster2.ID, actualCluster2.ID)
require.Equal(t, model.ProviderAWS, actualCluster2.Provider)
// require.Equal(t, []string{"zone"}, actualCluster2.Zones)
require.Equal(t, model.ClusterStateCreationRequested, actualCluster2.State)
require.Equal(t, cluster2.Annotations, actualCluster2.Annotations)
time.Sleep(1 * time.Millisecond)
cluster3, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
})
require.NoError(t, err)
require.NotNil(t, cluster3)
require.Equal(t, model.ProviderAWS, cluster3.Provider)
// require.Equal(t, []string{"zone"}, cluster3.Zones)
actualCluster3, err := client.GetCluster(cluster3.ID)
require.NoError(t, err)
require.Equal(t, cluster3.ID, actualCluster3.ID)
require.Equal(t, model.ProviderAWS, actualCluster3.Provider)
// require.Equal(t, []string{"zone"}, actualCluster3.Zones)
require.Equal(t, model.ClusterStateCreationRequested, actualCluster3.State)
t.Run("get clusters, page 0, perPage 2, exclude deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 0,
PerPage: 2,
IncludeDeleted: false,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster1, cluster2}, clusters)
})
t.Run("get clusters, page 1, perPage 2, exclude deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 1,
PerPage: 2,
IncludeDeleted: false,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster3.ToDTO(nil)}, clusters)
})
t.Run("delete cluster", func(t *testing.T) {
cluster2.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster2.Cluster)
require.NoError(t, errTest)
errTest = client.DeleteCluster(cluster2.ID)
require.NoError(t, errTest)
cluster4, errTest := client.GetCluster(cluster2.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateDeletionRequested, cluster4.State)
})
t.Run("get clusters after deletion request", func(t *testing.T) {
t.Run("page 0, perPage 2, exclude deleted", func(t *testing.T) {
cluster2Updated, errTest := client.GetCluster(cluster2.ID)
require.NoError(t, errTest)
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 0,
PerPage: 2,
IncludeDeleted: false,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster1, cluster2Updated}, clusters)
})
t.Run("page 1, perPage 2, exclude deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 1,
PerPage: 2,
IncludeDeleted: false,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster3}, clusters)
})
t.Run("page 0, perPage 2, include deleted", func(t *testing.T) {
cluster2Updated, errTest := client.GetCluster(cluster2.ID)
require.NoError(t, errTest)
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 0,
PerPage: 2,
IncludeDeleted: true,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster1, cluster2Updated}, clusters)
})
t.Run("page 1, perPage 2, include deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 1,
PerPage: 2,
IncludeDeleted: true,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster3}, clusters)
})
})
err = sqlStore.DeleteCluster(cluster2.ID)
require.NoError(t, err)
cluster2, err = client.GetCluster(cluster2.ID)
require.NoError(t, err)
require.NotEqual(t, 0, cluster2.DeleteAt)
t.Run("get clusters after actual deletion", func(t *testing.T) {
t.Run("page 0, perPage 2, exclude deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 0,
PerPage: 2,
IncludeDeleted: false,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster1, cluster3}, clusters)
})
t.Run("page 1, perPage 2, exclude deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 1,
PerPage: 2,
IncludeDeleted: false,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{}, clusters)
})
t.Run("page 0, perPage 2, include deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 0,
PerPage: 2,
IncludeDeleted: true,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster1, cluster2}, clusters)
})
t.Run("page 1, perPage 2, include deleted", func(t *testing.T) {
clusters, errTest := client.GetClusters(&model.GetClustersRequest{
Paging: model.Paging{
Page: 1,
PerPage: 2,
IncludeDeleted: true,
},
})
require.NoError(t, errTest)
require.Equal(t, []*model.ClusterDTO{cluster3}, clusters)
})
})
})
}
func TestCreateCluster(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
t.Run("invalid payload", func(t *testing.T) {
resp, errTest := http.Post(fmt.Sprintf("%s/api/clusters", ts.URL), "application/json", bytes.NewReader([]byte("invalid")))
require.NoError(t, errTest)
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
t.Run("empty payload", func(t *testing.T) {
resp, errTest := http.Post(fmt.Sprintf("%s/api/clusters", ts.URL), "application/json", bytes.NewReader([]byte("")))
require.NoError(t, errTest)
require.Equal(t, http.StatusAccepted, resp.StatusCode)
})
t.Run("invalid provider", func(t *testing.T) {
_, errTest := client.CreateCluster(&model.CreateClusterRequest{
Provider: "invalid",
Zones: []string{"zone"},
})
require.EqualError(t, errTest, "failed with status code 400")
})
t.Run("invalid annotation", func(t *testing.T) {
_, errTest := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my invalid annotation"},
})
require.EqualError(t, errTest, "failed with status code 400")
})
t.Run("valid", func(t *testing.T) {
cluster, errTest := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my-annotation"},
})
require.NoError(t, errTest)
require.Equal(t, model.ProviderAWS, cluster.Provider)
require.Equal(t, model.ClusterStateCreationRequested, cluster.State)
require.True(t, containsAnnotation("my-annotation", cluster.Annotations))
// TODO: more fields...
})
t.Run("handle annotations", func(t *testing.T) {
annotations := []*model.Annotation{
{ID: "", Name: "multi-tenant"},
{ID: "", Name: "super-awesome"},
}
for _, ann := range annotations {
errTest := sqlStore.CreateAnnotation(ann)
require.NoError(t, errTest)
}
for _, testCase := range []struct {
description string
annotations []string
expected []*model.Annotation
}{
{"nil annotations", nil, nil},
{"empty annotations", []string{}, nil},
{"with annotations", []string{"multi-tenant", "super-awesome"}, annotations},
} {
t.Run(testCase.description, func(t *testing.T) {
cluster, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: testCase.annotations,
})
require.NoError(t, err)
assert.Equal(t, testCase.expected, model.SortAnnotations(cluster.Annotations))
})
}
})
}
func TestRetryCreateCluster(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my-annotation"},
})
require.NoError(t, err)
t.Run("unknown cluster", func(t *testing.T) {
errTest := client.RetryCreateCluster(model.NewID())
require.EqualError(t, errTest, "failed with status code 404")
})
t.Run("while locked", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
err = sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, err)
lockerID := model.NewID()
locked, errTest := sqlStore.LockCluster(cluster1.ID, lockerID)
require.NoError(t, errTest)
require.True(t, locked)
defer func() {
unlocked, errDefer := sqlStore.UnlockCluster(cluster1.ID, lockerID, false)
require.NoError(t, errDefer)
require.True(t, unlocked)
}()
errTest = client.RetryCreateCluster(cluster1.ID)
require.EqualError(t, errTest, "failed with status code 409")
})
t.Run("while creating", func(t *testing.T) {
cluster1.State = model.ClusterStateCreationRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
errTest = client.RetryCreateCluster(cluster1.ID)
require.NoError(t, errTest)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateCreationRequested, cluster2.State)
assert.True(t, containsAnnotation("my-annotation", cluster2.Annotations))
})
t.Run("while stable", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
errTest = client.RetryCreateCluster(cluster1.ID)
require.EqualError(t, errTest, "failed with status code 400")
})
t.Run("while creation failed", func(t *testing.T) {
cluster1.State = model.ClusterStateCreationFailed
err = sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, err)
err = client.RetryCreateCluster(cluster1.ID)
require.NoError(t, err)
cluster1, err = client.GetCluster(cluster1.ID)
require.NoError(t, err)
require.Equal(t, model.ClusterStateCreationRequested, cluster1.State)
})
}
func TestProvisionCluster(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my-annotation"},
})
require.NoError(t, err)
t.Run("unknown cluster", func(t *testing.T) {
clusterResp, errTest := client.ProvisionCluster(model.NewID(), nil)
require.EqualError(t, errTest, "failed with status code 404")
assert.Nil(t, clusterResp)
})
t.Run("while locked", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
lockerID := model.NewID()
locked, errTest := sqlStore.LockCluster(cluster1.ID, lockerID)
require.NoError(t, errTest)
require.True(t, locked)
defer func() {
unlocked, errDefer := sqlStore.UnlockCluster(cluster1.ID, lockerID, false)
require.NoError(t, errDefer)
require.True(t, unlocked)
}()
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.EqualError(t, errTest, "failed with status code 409")
assert.Nil(t, clusterResp)
})
t.Run("while api-security-locked", func(t *testing.T) {
errTest := sqlStore.LockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.EqualError(t, errTest, "failed with status code 403")
assert.Nil(t, clusterResp)
errTest = sqlStore.UnlockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
})
t.Run("while provisioning", func(t *testing.T) {
cluster1.State = model.ClusterStateProvisioningRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTEst := client.GetCluster(cluster1.ID)
require.NoError(t, errTEst)
require.Equal(t, model.ClusterStateProvisioningRequested, cluster2.State)
assert.True(t, containsAnnotation("my-annotation", cluster2.Annotations))
})
t.Run("after provisioning failed", func(t *testing.T) {
cluster1.State = model.ClusterStateProvisioningFailed
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateProvisioningRequested, cluster2.State)
})
t.Run("while upgrading", func(t *testing.T) {
cluster1.State = model.ClusterStateUpgradeRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateUpgradeRequested, cluster2.State)
})
t.Run("after upgrade failed", func(t *testing.T) {
cluster1.State = model.ClusterStateUpgradeFailed
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateUpgradeFailed, cluster2.State)
})
t.Run("while stable", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateProvisioningRequested, cluster2.State)
})
t.Run("while deleting", func(t *testing.T) {
cluster1.State = model.ClusterStateDeletionRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ProvisionCluster(cluster1.ID, nil)
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
}
func TestUpgradeCluster(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my-annotation"},
})
require.NoError(t, err)
t.Run("unknown cluster", func(t *testing.T) {
clusterResp, errTest := client.UpgradeCluster(model.NewID(), &model.PatchUpgradeClusterRequest{Version: sToP("latest")})
require.EqualError(t, errTest, "failed with status code 404")
assert.Nil(t, clusterResp)
})
t.Run("while locked", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
lockerID := model.NewID()
locked, errTest := sqlStore.LockCluster(cluster1.ID, lockerID)
require.NoError(t, errTest)
require.True(t, locked)
defer func() {
unlocked, errDefer := sqlStore.UnlockCluster(cluster1.ID, lockerID, false)
require.NoError(t, errDefer)
require.True(t, unlocked)
}()
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: sToP("latest")})
require.EqualError(t, errTest, "failed with status code 409")
assert.Nil(t, clusterResp)
})
t.Run("while api-security-locked", func(t *testing.T) {
errTest := sqlStore.LockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
version := "latest"
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: &version})
require.EqualError(t, errTest, "failed with status code 403")
assert.Nil(t, clusterResp)
errTest = sqlStore.UnlockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
})
t.Run("while upgrading", func(t *testing.T) {
cluster1.State = model.ClusterStateUpgradeRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
version := "latest"
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: &version})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
assert.Equal(t, model.ClusterStateUpgradeRequested, cluster2.State)
assert.Equal(t, version, cluster2.ProvisionerMetadataKops.ChangeRequest.Version)
assert.Empty(t, cluster2.ProvisionerMetadataKops.AMI)
assert.True(t, containsAnnotation("my-annotation", cluster2.Annotations))
})
t.Run("after upgrade failed", func(t *testing.T) {
cluster1.State = model.ClusterStateUpgradeFailed
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
version := "latest"
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: &version})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
assert.Equal(t, model.ClusterStateUpgradeRequested, cluster2.State)
assert.Equal(t, version, cluster2.ProvisionerMetadataKops.ChangeRequest.Version)
assert.Empty(t, cluster2.ProvisionerMetadataKops.AMI)
})
t.Run("while stable, to latest", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
version := "latest"
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: &version})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
assert.Equal(t, model.ClusterStateUpgradeRequested, cluster2.State)
assert.Equal(t, version, cluster2.ProvisionerMetadataKops.ChangeRequest.Version)
assert.Empty(t, cluster2.ProvisionerMetadataKops.AMI)
})
t.Run("while stable, to valid version", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
version := "1.14.1"
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: &version})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
assert.Equal(t, model.ClusterStateUpgradeRequested, cluster2.State)
assert.Equal(t, version, cluster2.ProvisionerMetadataKops.ChangeRequest.Version)
assert.Empty(t, cluster2.ProvisionerMetadataKops.AMI)
})
t.Run("while stable, to invalid version", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: sToP("invalid")})
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
t.Run("while stable, to valid version and new AMI", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
version := "1.14.1"
ami := "mattermost-os"
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{
Version: &version,
AMI: &ami,
})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
assert.Equal(t, model.ClusterStateUpgradeRequested, cluster2.State)
assert.Equal(t, version, cluster2.ProvisionerMetadataKops.ChangeRequest.Version)
assert.Equal(t, ami, cluster2.ProvisionerMetadataKops.ChangeRequest.AMI)
})
t.Run("while deleting", func(t *testing.T) {
cluster1.State = model.ClusterStateDeletionRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.UpgradeCluster(cluster1.ID, &model.PatchUpgradeClusterRequest{Version: sToP("latest")})
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
}
func TestUpdateClusterConfiguration(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
AllowInstallations: true,
Annotations: []string{"my-annotation"},
})
require.NoError(t, err)
cluster1.ProvisionerMetadataKops.NodeMinCount = 5
err = sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, err)
t.Run("unknown cluster", func(t *testing.T) {
clusterResp, errTest := client.UpdateCluster(model.NewID(), &model.UpdateClusterRequest{})
require.EqualError(t, errTest, "failed with status code 404")
assert.Nil(t, clusterResp)
})
t.Run("while locked", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
lockerID := model.NewID()
locked, errTest := sqlStore.LockCluster(cluster1.ID, lockerID)
require.NoError(t, errTest)
require.True(t, locked)
defer func() {
unlocked, errDefer := sqlStore.UnlockCluster(cluster1.ID, lockerID, false)
require.NoError(t, errDefer)
require.True(t, unlocked)
}()
clusterResp, errTest := client.UpdateCluster(cluster1.ID, &model.UpdateClusterRequest{})
require.EqualError(t, errTest, "failed with status code 409")
assert.Nil(t, clusterResp)
})
t.Run("while api-security-locked", func(t *testing.T) {
errTest := sqlStore.LockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
clusterResp, errTest := client.UpdateCluster(cluster1.ID, &model.UpdateClusterRequest{})
require.EqualError(t, errTest, "failed with status code 403")
assert.Nil(t, clusterResp)
errTest = sqlStore.UnlockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
})
t.Run("while stable", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.UpdateCluster(cluster1.ID, &model.UpdateClusterRequest{AllowInstallations: false})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
assert.Equal(t, model.ClusterStateStable, cluster2.State)
assert.False(t, cluster2.AllowInstallations)
assert.True(t, containsAnnotation("my-annotation", cluster2.Annotations))
})
}
func TestResizeCluster(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
Annotations: []string{"my-annotation"},
})
require.NoError(t, err)
cluster1.ProvisionerMetadataKops.NodeMinCount = 5
err = sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, err)
t.Run("unknown cluster", func(t *testing.T) {
clusterResp, errTest := client.ResizeCluster(model.NewID(), &model.PatchClusterSizeRequest{})
require.EqualError(t, errTest, "failed with status code 404")
assert.Nil(t, clusterResp)
})
t.Run("while locked", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
lockerID := model.NewID()
locked, errTest := sqlStore.LockCluster(cluster1.ID, lockerID)
require.NoError(t, errTest)
require.True(t, locked)
defer func() {
unlocked, errDefer := sqlStore.UnlockCluster(cluster1.ID, lockerID, false)
require.NoError(t, errDefer)
require.True(t, unlocked)
}()
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{})
require.EqualError(t, errTest, "failed with status code 409")
assert.Nil(t, clusterResp)
})
t.Run("while api-security-locked", func(t *testing.T) {
errTest := sqlStore.LockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{NodeInstanceType: sToP("test1")})
require.EqualError(t, errTest, "failed with status code 403")
assert.Nil(t, clusterResp)
errTest = sqlStore.UnlockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
})
t.Run("while resizing", func(t *testing.T) {
cluster1.State = model.ClusterStateResizeRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{NodeInstanceType: sToP("test1")})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateResizeRequested, cluster2.State)
assert.Equal(t, "test1", cluster2.ProvisionerMetadataKops.ChangeRequest.NodeInstanceType)
assert.True(t, containsAnnotation("my-annotation", cluster2.Annotations))
})
t.Run("after resize failed", func(t *testing.T) {
cluster1.State = model.ClusterStateResizeFailed
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{NodeInstanceType: sToP("test2")})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateResizeRequested, cluster2.State)
assert.Equal(t, "test2", cluster2.ProvisionerMetadataKops.ChangeRequest.NodeInstanceType)
})
t.Run("while stable", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{NodeInstanceType: sToP("test3")})
require.NoError(t, errTest)
assert.NotNil(t, clusterResp)
cluster2, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateResizeRequested, cluster2.State)
assert.Equal(t, "test3", cluster2.ProvisionerMetadataKops.ChangeRequest.NodeInstanceType)
})
t.Run("while stable, to max node count lower than cluster min", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
max := int64(1)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{NodeMaxCount: &max})
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
t.Run("while stable, to invalid size", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
min := int64(10)
max := int64(5)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{NodeMinCount: &min, NodeMaxCount: &max})
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
t.Run("while upgrading", func(t *testing.T) {
cluster1.State = model.ClusterStateUpgradeRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{})
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
t.Run("while deleting", func(t *testing.T) {
cluster1.State = model.ClusterStateDeletionRequested
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
clusterResp, errTest := client.ResizeCluster(cluster1.ID, &model.PatchClusterSizeRequest{})
require.EqualError(t, errTest, "failed with status code 400")
assert.Nil(t, clusterResp)
})
}
func TestDeleteCluster(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
defer ts.Close()
client := model.NewClient(ts.URL)
cluster1, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
})
require.NoError(t, err)
// cluster2 will have a cluster installation running on it
cluster2, err := client.CreateCluster(&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
})
require.NoError(t, err)
err = sqlStore.CreateClusterInstallation(&model.ClusterInstallation{
ClusterID: cluster2.ID,
InstallationID: model.NewID(),
State: model.ClusterInstallationStateStable,
})
require.NoError(t, err)
t.Run("unknown cluster", func(t *testing.T) {
errTest := client.DeleteCluster(model.NewID())
require.EqualError(t, errTest, "failed with status code 404")
})
t.Run("while locked", func(t *testing.T) {
cluster1.State = model.ClusterStateStable
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
lockerID := model.NewID()
locked, errTest := sqlStore.LockCluster(cluster1.ID, lockerID)
require.NoError(t, errTest)
require.True(t, locked)
defer func() {
unlocked, errDefer := sqlStore.UnlockCluster(cluster1.ID, lockerID, false)
require.NoError(t, errDefer)
require.True(t, unlocked)
clusterCheck, errDefer := client.GetCluster(cluster1.ID)
require.NoError(t, errDefer)
require.Equal(t, int64(0), clusterCheck.LockAcquiredAt)
}()
errTest = client.DeleteCluster(cluster1.ID)
require.EqualError(t, errTest, "failed with status code 409")
})
t.Run("while api-security-locked", func(t *testing.T) {
errTest := sqlStore.LockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
errTest = client.DeleteCluster(cluster1.ID)
require.EqualError(t, errTest, "failed with status code 403")
errTest = sqlStore.UnlockClusterAPI(cluster1.ID)
require.NoError(t, errTest)
})
// valid unlocked states
states := []string{
model.ClusterStateStable,
model.ClusterStateCreationRequested,
model.ClusterStateCreationFailed,
model.ClusterStateProvisioningFailed,
model.ClusterStateUpgradeRequested,
model.ClusterStateUpgradeFailed,
model.ClusterStateDeletionRequested,
model.ClusterStateDeletionFailed,
}
t.Run("from a valid, unlocked state", func(t *testing.T) {
for _, state := range states {
t.Run(state, func(t *testing.T) {
cluster1.State = state
errTest := sqlStore.UpdateCluster(cluster1.Cluster)
require.NoError(t, errTest)
errTest = client.DeleteCluster(cluster1.ID)
require.NoError(t, errTest)
clusterCheck, errTest := client.GetCluster(cluster1.ID)
require.NoError(t, errTest)
require.Equal(t, model.ClusterStateDeletionRequested, clusterCheck.State)
})
}
})
t.Run("from a valid, unlocked state, but not empty of cluster installations", func(t *testing.T) {
for _, state := range states {
t.Run(state, func(t *testing.T) {
cluster2.State = state
errTest := sqlStore.UpdateCluster(cluster2.Cluster)
require.NoError(t, errTest)
errTest = client.DeleteCluster(cluster2.ID)
require.Error(t, errTest)
clusterCheck, errTest := client.GetCluster(cluster2.ID)
require.NoError(t, errTest)
require.Equal(t, state, clusterCheck.State)
})
}
})
}
func TestGetAllUtilityMetadata(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
client := model.NewClient(ts.URL)
c, err := client.CreateCluster(
&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
DesiredUtilityVersions: map[string]*model.HelmUtilityVersion{
"prometheus-operator": {Chart: "48.1.2"},
"nginx": {Chart: "stable"},
},
})
require.NoError(t, err)
utilityMetadata, err := client.GetClusterUtilities(c.ID)
require.NoError(t, err)
var nilVersion *model.HelmUtilityVersion = nil
assert.Equal(t, nilVersion, utilityMetadata.ActualVersions.Nginx)
assert.Equal(t, nilVersion, utilityMetadata.ActualVersions.Fluentbit)
assert.Equal(t, &model.HelmUtilityVersion{Chart: "stable"}, utilityMetadata.DesiredVersions.Nginx)
assert.Equal(t, &model.HelmUtilityVersion{Chart: "48.1.2", ValuesPath: ""}, utilityMetadata.DesiredVersions.PrometheusOperator)
assert.Equal(t, model.DefaultUtilityVersions[model.FluentbitCanonicalName], utilityMetadata.DesiredVersions.Fluentbit)
}
func TestClusterAnnotations(t *testing.T) {
logger := testlib.MakeLogger(t)
sqlStore := store.MakeTestSQLStore(t, logger)
defer store.CloseConnection(t, sqlStore)
router := mux.NewRouter()
api.Register(router, &api.Context{
Store: sqlStore,
Supervisor: &mockSupervisor{},
EventProducer: testutil.SetupTestEventsProducer(sqlStore, logger),
Metrics: &mockMetrics{},
Logger: logger,
Provisioner: &mockProvisioner{},
})
ts := httptest.NewServer(router)
client := model.NewClient(ts.URL)
cluster, err := client.CreateCluster(
&model.CreateClusterRequest{
Provider: model.ProviderAWS,
Zones: []string{"zone"},
})
require.NoError(t, err)
annotationsRequest := &model.AddAnnotationsRequest{
Annotations: []string{"my-annotation", "super-awesome123"},
}
cluster, err = client.AddClusterAnnotations(cluster.ID, annotationsRequest)
require.NoError(t, err)
assert.Equal(t, 2, len(cluster.Annotations))
assert.True(t, containsAnnotation("my-annotation", cluster.Annotations))
assert.True(t, containsAnnotation("super-awesome123", cluster.Annotations))
annotationsRequest = &model.AddAnnotationsRequest{
Annotations: []string{"my-annotation2"},
}
cluster, err = client.AddClusterAnnotations(cluster.ID, annotationsRequest)
require.NoError(t, err)
assert.Equal(t, 3, len(cluster.Annotations))
cluster, err = client.GetCluster(cluster.ID)
require.NoError(t, err)
assert.Equal(t, 3, len(cluster.Annotations))
assert.True(t, containsAnnotation("my-annotation", cluster.Annotations))
assert.True(t, containsAnnotation("my-annotation2", cluster.Annotations))
assert.True(t, containsAnnotation("super-awesome123", cluster.Annotations))
t.Run("fail to add duplicated annotation", func(t *testing.T) {
annotationsRequest = &model.AddAnnotationsRequest{
Annotations: []string{"my-annotation"},
}
_, err = client.AddClusterAnnotations(cluster.ID, annotationsRequest)
require.Error(t, err)
})
t.Run("fail to add invalid annotation", func(t *testing.T) {
annotationsRequest = &model.AddAnnotationsRequest{
Annotations: []string{"_my-annotation"},
}
_, err = client.AddClusterAnnotations(cluster.ID, annotationsRequest)
require.Error(t, err)
})
t.Run("fail to add or delete while api-security-locked", func(t *testing.T) {
annotationsRequest = &model.AddAnnotationsRequest{
Annotations: []string{"is-locked"},
}
err = sqlStore.LockClusterAPI(cluster.ID)
require.NoError(t, err)
_, err = client.AddClusterAnnotations(cluster.ID, annotationsRequest)
require.Error(t, err)
err = client.DeleteClusterAnnotation(cluster.ID, "my-annotation2")
require.Error(t, err)
err = sqlStore.UnlockClusterAPI(cluster.ID)
require.NoError(t, err)
})
err = client.DeleteClusterAnnotation(cluster.ID, "my-annotation2")
require.NoError(t, err)
cluster, err = client.GetCluster(cluster.ID)
require.NoError(t, err)
assert.Equal(t, 2, len(cluster.Annotations))
t.Run("delete unknown annotation", func(t *testing.T) {
err = client.DeleteClusterAnnotation(cluster.ID, "unknown")
require.NoError(t, err)
cluster, err = client.GetCluster(cluster.ID)
require.NoError(t, err)
assert.Equal(t, 2, len(cluster.Annotations))
})
t.Run("fail with 403 if deleting annotation used by installation", func(t *testing.T) {
annotations := []*model.Annotation{
{Name: "my-annotation"},
}
installation := &model.Installation{}
err = sqlStore.CreateInstallation(installation, annotations, nil)
clusterInstallation := &model.ClusterInstallation{
InstallationID: installation.ID,
ClusterID: cluster.ID,
}
err = sqlStore.CreateClusterInstallation(clusterInstallation)
require.NoError(t, err)
err = client.DeleteClusterAnnotation(cluster.ID, "my-annotation")
require.Error(t, err)
assert.Contains(t, err.Error(), "403")
})
}
func containsAnnotation(name string, annotations []*model.Annotation) bool {
for _, a := range annotations {
if a.Name == name {
return true
}
}
return false
}
|
package xsd
import (
"encoding/xml"
"strings"
"github.com/iancoleman/strcase"
)
// Attribute defines single XML attribute
type Enumeration struct {
XMLName xml.Name `xml:"http://www.w3.org/2001/XMLSchema enumeration"`
Value string `xml:"value,attr"`
}
// Public Go Name of this struct item
func (e *Enumeration) GoName() string {
return strcase.ToCamel(strings.ToLower(e.Value))
}
func (e *Enumeration) Modifiers() string {
return "-"
}
func (e *Enumeration) XmlName() string {
return e.Value
}
func (e *Enumeration) compile(s *Schema) {
}
|
package main
import (
"encoding/json"
"io"
"strings"
"github.com/asim/go-micro/plugins/client/grpc/v3"
_ "github.com/asim/go-micro/plugins/registry/etcd/v3"
"github.com/asim/go-micro/plugins/server/http/v3"
"github.com/asim/go-micro/v3"
"github.com/asim/go-micro/v3/client"
"github.com/asim/go-micro/v3/logger"
"github.com/asim/go-micro/v3/registry"
"github.com/gin-gonic/gin"
)
func main() {
srv := micro.NewService(
micro.Server(http.NewServer()),
micro.Client(grpc.NewClient()),
micro.Name("gateway"),
micro.Address(":8080"),
)
srv.Init()
router := gin.New()
router.Use(gin.Recovery())
router.Use(gin.Logger())
router.POST("/:service/:endpoint", func(ctx *gin.Context) {
service, endpoint := ctx.Param("service"), ctx.Param("endpoint")
defer ctx.Request.Body.Close()
data, err := io.ReadAll(ctx.Request.Body)
if err != nil {
logger.Error(err)
ctx.AbortWithStatusJSON(500, err.Error())
return
}
var request json.RawMessage
if len(data) > 0 {
d := json.NewDecoder(strings.NewReader(string(data)))
d.UseNumber()
if err := d.Decode(&request); err != nil {
logger.Error(err)
ctx.AbortWithStatusJSON(500, err.Error())
return
}
}
c := srv.Client()
var response json.RawMessage
if err := c.Call(ctx, c.NewRequest(service, endpoint, request, client.WithContentType("application/json")), &response); err != nil {
logger.Error(err)
ctx.AbortWithStatusJSON(500, err.Error())
return
}
ctx.JSON(200, response)
})
router.GET("/:service/nodes", func(ctx *gin.Context) {
services, err := srv.Options().Registry.GetService(ctx.Param("service"))
if err != nil {
logger.Error(err)
ctx.AbortWithStatusJSON(500, err.Error())
return
}
if len(services) == 0 {
ctx.AbortWithStatusJSON(400, "service not found")
return
}
nodes := make([]*registry.Node, 0)
for _, s := range services {
nodes = append(nodes, s.Nodes...)
}
ctx.JSON(200, nodes)
})
if err := micro.RegisterHandler(srv.Server(), router); err != nil {
logger.Fatal(err)
}
if err := srv.Run(); err != nil {
logger.Fatal(err)
}
}
|
package data
func Get(search string) string {
return search
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
)
const jotformKey = "jotformApiKey"
const jotformCache = "jotform.json"
// /form/{formID}/submissions?apiKey={apiKey}"
const jotformURL = "https://api.jotform.com"
// TODO get this programmatically via /user/forms/
const formID = "92310716200139"
// The last year that we want to purge, inclusive
const latestGradYear = 2020
func getSubmissions() map[string]interface{} {
data, err := ioutil.ReadFile(jotformCache)
if err != nil {
// Try API call
client := &http.Client{}
v := url.Values{}
v.Set("limit", "1000")
req, err := http.NewRequest("GET", fmt.Sprintf("%s?%s",
strings.Join([]string{jotformURL, "form", formID, "submissions"}, "/"),
v.Encode()), nil)
if err != nil {
epanic(err, "could not create request")
}
req.Header.Add("APIKEY", getToken(jotformKey))
resp, err := client.Do(req)
if err != nil {
epanic(err, "could not get")
}
defer resp.Body.Close()
data, err = ioutil.ReadAll(resp.Body)
ioutil.WriteFile(jotformCache, data, 0644)
}
// Unmarshal
processedBody := make(map[string]interface{})
err = json.Unmarshal(data, &processedBody)
return processedBody
}
func jotformGetInactiveCaptains() {
data := getSubmissions()
fmt.Println("code", data["responseCode"].(float64))
// Get people who have already graduated
submissions := data["content"].([]interface{})
fmt.Println("num responses", len(submissions))
graduatedPeopleAnswers := make([]map[string]interface{}, 0)
for _, x := range submissions {
// Get graduation year
resp := x.(map[string]interface{})
answers := resp["answers"].(map[string]interface{})
var email string
graduated := false
for _, a := range answers {
ac := a.(map[string]interface{})
if strings.Index(ac["name"].(string), "yourEmail") >= 0 {
email = ac["answer"].(string)
} else if strings.Index(ac["name"].(string), "graduationYear") >= 0 {
// found the right index
year := ac["answer"].(string)
if y, _ := strconv.Atoi(year); y <= latestGradYear {
// this foo be graduated
graduated = true
}
}
}
if graduated {
fmt.Println(email)
}
}
fmt.Println("we have", len(graduatedPeopleAnswers), "graduated people")
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"github.com/compose-spec/compose-go/types"
"github.com/docker/cli/cli"
cmd "github.com/docker/cli/cli/command/container"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/utils"
"github.com/docker/docker/pkg/stringid"
)
func (s *composeService) RunOneOffContainer(ctx context.Context, project *types.Project, opts api.RunOptions) (int, error) {
containerID, err := s.prepareRun(ctx, project, opts)
if err != nil {
return 0, err
}
// remove cancellable context signal handler so we can forward signals to container without compose to exit
signal.Reset()
sigc := make(chan os.Signal, 128)
signal.Notify(sigc)
go cmd.ForwardAllSignals(ctx, s.dockerCli, containerID, sigc)
defer signal.Stop(sigc)
err = cmd.RunStart(s.dockerCli, &cmd.StartOptions{
OpenStdin: !opts.Detach && opts.Interactive,
Attach: !opts.Detach,
Containers: []string{containerID},
})
var stErr cli.StatusError
if errors.As(err, &stErr) {
return stErr.StatusCode, nil
}
return 0, err
}
func (s *composeService) prepareRun(ctx context.Context, project *types.Project, opts api.RunOptions) (string, error) {
if err := prepareVolumes(project); err != nil { // all dependencies already checked, but might miss service img
return "", err
}
service, err := project.GetService(opts.Service)
if err != nil {
return "", err
}
applyRunOptions(project, &service, opts)
if err := s.stdin().CheckTty(opts.Interactive, service.Tty); err != nil {
return "", err
}
slug := stringid.GenerateRandomID()
if service.ContainerName == "" {
service.ContainerName = fmt.Sprintf("%[1]s%[4]s%[2]s%[4]srun%[4]s%[3]s", project.Name, service.Name, stringid.TruncateID(slug), api.Separator)
}
service.Scale = 1
service.Restart = ""
if service.Deploy != nil {
service.Deploy.RestartPolicy = nil
}
service.CustomLabels = service.CustomLabels.
Add(api.SlugLabel, slug).
Add(api.OneoffLabel, "True")
if err := s.ensureImagesExists(ctx, project, opts.Build, opts.QuietPull); err != nil { // all dependencies already checked, but might miss service img
return "", err
}
observedState, err := s.getContainers(ctx, project.Name, oneOffInclude, true)
if err != nil {
return "", err
}
updateServices(&service, observedState)
if !opts.NoDeps {
if err := s.waitDependencies(ctx, project, service.DependsOn, observedState); err != nil {
return "", err
}
}
createOpts := createOptions{
AutoRemove: opts.AutoRemove,
AttachStdin: opts.Interactive,
UseNetworkAliases: opts.UseNetworkAliases,
Labels: mergeLabels(service.Labels, service.CustomLabels),
}
created, err := s.createContainer(ctx, project, service, service.ContainerName, 1, createOpts)
if err != nil {
return "", err
}
return created.ID, nil
}
func applyRunOptions(project *types.Project, service *types.ServiceConfig, opts api.RunOptions) {
service.Tty = opts.Tty
service.StdinOpen = opts.Interactive
service.ContainerName = opts.Name
if len(opts.Command) > 0 {
service.Command = opts.Command
}
if len(opts.User) > 0 {
service.User = opts.User
}
if len(opts.CapAdd) > 0 {
service.CapAdd = append(service.CapAdd, opts.CapAdd...)
service.CapDrop = utils.Remove(service.CapDrop, opts.CapAdd...)
}
if len(opts.CapDrop) > 0 {
service.CapDrop = append(service.CapDrop, opts.CapDrop...)
service.CapAdd = utils.Remove(service.CapAdd, opts.CapDrop...)
}
if len(opts.WorkingDir) > 0 {
service.WorkingDir = opts.WorkingDir
}
if opts.Entrypoint != nil {
service.Entrypoint = opts.Entrypoint
if len(opts.Command) == 0 {
service.Command = []string{}
}
}
if len(opts.Environment) > 0 {
cmdEnv := types.NewMappingWithEquals(opts.Environment)
serviceOverrideEnv := cmdEnv.Resolve(func(s string) (string, bool) {
v, ok := envResolver(project.Environment)(s)
return v, ok
}).RemoveEmpty()
service.Environment.OverrideBy(serviceOverrideEnv)
}
for k, v := range opts.Labels {
service.Labels = service.Labels.Add(k, v)
}
}
|
package toggl
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
)
// This should change according to environment
const APIURL = "https://toggl.com/"
type TogglAPIError struct {
status int
message string
endpoint string
}
func (t TogglAPIError) Error() string {
return fmt.Sprintf("endpoint %d returned a status %d: %s", t.endpoint, t.status, t.message)
}
var ErrorTimeEntryNotFound = TogglAPIError{404, "not found", APIURL}
var ErrorUnauthorized = TogglAPIError{401, "unauthorized", APIURL}
var ErrorForbidden = TogglAPIError{403, "forbidden", APIURL}
type Client struct {
apiURL url.URL
token string
}
type TimeEntry struct {
ID int `json:"id"`
WorkspaceID int64 `json:"workspace_id"`
ProjectID int64 `json:"project_id"`
Description string `json:"description"`
}
type Project struct {
ProjectID int64 `json:"id"`
Name string `json:"name"`
}
type TogglUser struct {
UserID int64 `json:"id"`
Name string `json:"fullname"`
Mail string `json:"email"`
}
func New(token string) *Client {
u, err := url.Parse(APIURL)
if err != nil {
panic(fmt.Errorf("provided URL %s can't be parsed: %s", APIURL, err.Error()))
}
return &Client{apiURL: *u, token: token}
}
func (c *Client) GetCurrentTimeEntry() (*TimeEntry, error) {
c.apiURL.Path = "api/v9/me/time_entries/current"
body, err := c.makeRequest()
if err != nil {
return nil, err
}
var currentTE TimeEntry
err = json.Unmarshal(body, ¤tTE)
if err != nil {
return nil, err
}
return ¤tTE, nil
}
func (c *Client) GetProject(workspaceID, projectID int64) (*Project, error) {
c.apiURL.Path = fmt.Sprintf("api/v9/workspaces/%d/projects/%d", workspaceID, projectID)
body, err := c.makeRequest()
if err != nil {
return nil, err
}
var project Project
err = json.Unmarshal(body, &project)
if err != nil {
return nil, err
}
return &project, nil
}
func (c *Client) GetUser() (*TogglUser, error) {
c.apiURL.Path = "api/v9/me"
body, err := c.makeRequest()
if err != nil {
return nil, err
}
var user TogglUser
err = json.Unmarshal(body, &user)
if err != nil {
return nil, err
}
return &user, nil
}
func (c *Client) makeRequest() ([]byte, error) {
auth := fmt.Sprintf("Basic %s", c.token)
req, err := http.NewRequest("GET", c.apiURL.String(), nil)
if err != nil {
return nil, err
}
req.Header.Add("authorization", auth)
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode == http.StatusUnauthorized {
return nil, ErrorUnauthorized
}
if res.StatusCode == http.StatusForbidden {
return nil, ErrorForbidden
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
// Some Toggl endpoints return this string instead of a 404 status
if string(body) == "null" {
return nil, ErrorTimeEntryNotFound
}
return body, nil
}
|
// Copyright 2020 cloudeng llc. All rights reserved.
// Use of this source code is governed by the Apache-2.0
// license that can be found in the LICENSE file.
package cloudpath_test
import (
"testing"
"cloudeng.io/path/cloudpath"
)
func TestGoogleCloudStorage(t *testing.T) {
data := []matcherTestSpec{
{
"gs://",
cloudpath.GoogleCloudStorage, "", "", "", '/', nil,
},
{
"gs://bucket",
cloudpath.GoogleCloudStorage, "", "bucket", "/bucket", '/', nil,
},
{
"gs://bucket/",
cloudpath.GoogleCloudStorage, "", "bucket", "/bucket/", '/', nil,
},
{
"gs://bucket/object",
cloudpath.GoogleCloudStorage, "", "bucket", "/bucket/object", '/', nil,
},
{
"gs://bucket/object/",
cloudpath.GoogleCloudStorage, "", "bucket", "/bucket/object/", '/', nil,
},
{
"https://storage.cloud.google.com/bucket/path",
cloudpath.GoogleCloudStorage, "storage.cloud.google.com", "bucket", "/bucket/path", '/', nil,
},
{
"https://storage.cloud.google.com/bucket/path?a=b&c=d",
cloudpath.GoogleCloudStorage, "storage.cloud.google.com", "bucket", "/bucket/path", '/', exampleParameters,
},
{
"https://storage.cloud.google.com",
cloudpath.GoogleCloudStorage, "storage.cloud.google.com", "", "", '/', nil,
},
{
"https://storage.cloud.google.com/",
cloudpath.GoogleCloudStorage, "storage.cloud.google.com", "", "/", '/', nil,
},
{
"https://storage.cloud.google.com/bucket",
cloudpath.GoogleCloudStorage, "storage.cloud.google.com", "bucket", "/bucket", '/', nil,
},
{
"https://storage.cloud.google.com/bucket/",
cloudpath.GoogleCloudStorage, "storage.cloud.google.com", "bucket", "/bucket/", '/', nil,
},
{
"https://storage.googleapis.com/storage/v1/b/bucket/path",
cloudpath.GoogleCloudStorage, "storage.googleapis.com", "bucket", "/bucket/path", '/', nil,
},
{
"https://storage.googleapis.com/upload/storage/v1/b/bucket/path",
cloudpath.GoogleCloudStorage, "storage.googleapis.com", "bucket", "/bucket/path", '/', nil,
},
{
"https://storage.googleapis.com/batch/storage/v1/b/bucket/path",
cloudpath.GoogleCloudStorage, "storage.googleapis.com", "bucket", "/bucket/path", '/', nil,
},
{
"https://storage.googleapis.com",
cloudpath.GoogleCloudStorage, "storage.googleapis.com", "", "", '/', nil,
},
{
"https://storage.googleapis.com/wrong/prefix",
cloudpath.GoogleCloudStorage, "storage.googleapis.com", "", "/wrong/prefix", '/', nil,
},
}
if err := testMatcher(cloudpath.GoogleCloudStorageMatcher, data); err != nil {
t.Errorf("%v", err)
}
if err := testNoMatch(cloudpath.GoogleCloudStorageMatcher, []string{
"",
string([]byte{0x0}), // invalid URL
"https://s.us-west-2.amazonaws.com",
"s3:/a/b",
"https://my.bucket.s3.us-west-2.amazonaws.com/kitten.png",
"/a/b/c",
`c:\`,
`\\?c:`,
`\\host\share\a`,
}); err != nil {
t.Errorf("%v", err)
}
}
|
package main
import "fmt"
func main(){
//var deck string = "Ace of card"
deck := "Ace of card"
fmt.Println(deck)
} |
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keyserver
import (
"context"
"testing"
"time"
"github.com/google/keytransparency/core/domain"
"github.com/google/keytransparency/core/fake"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
pb "github.com/google/keytransparency/core/api/v1/keytransparency_proto"
)
func TestLatestRevision(t *testing.T) {
ctx := context.Background()
mapID := int64(2)
fakeAdmin := fake.NewDomainStorage()
fakeMap := fake.NewTrillianMapClient()
fakeLog := fake.NewTrillianLogClient()
if err := fakeAdmin.Write(ctx, &domain.Domain{
DomainID: domainID,
MapID: mapID,
MinInterval: 1 * time.Second,
MaxInterval: 5 * time.Second,
}); err != nil {
t.Fatalf("admin.Write(): %v", err)
}
// Advance the Map's revision without touching the log.
fakeMap.SetLeaves(ctx, nil) // Revision 1
fakeMap.SetLeaves(ctx, nil) // Revision 2
fakeMap.SetLeaves(ctx, nil) // Revision 3
fakeMap.SetLeaves(ctx, nil) // Revision 4
srv := &Server{
domains: fakeAdmin,
tlog: fakeLog,
tmap: fakeMap,
indexFunc: func(context.Context, *domain.Domain, string, string) ([32]byte, []byte, error) {
return [32]byte{}, []byte(""), nil
},
}
for _, tc := range []struct {
desc string
treeSize int64
wantErr codes.Code
wantRev int64
}{
{desc: "not initialized", treeSize: 0, wantErr: codes.Internal},
{desc: "log controls revision", treeSize: 2, wantErr: codes.OK, wantRev: 1},
} {
t.Run(tc.desc+" GetEntry", func(t *testing.T) {
fakeLog.TreeSize = tc.treeSize
resp, err := srv.GetEntry(ctx, &pb.GetEntryRequest{
DomainId: domainID,
})
if got, want := status.Code(err), tc.wantErr; got != want {
t.Errorf("GetEntry(): %v, want %v", err, want)
}
if err != nil {
return
}
if got, want := resp.Smr.MapRevision, tc.wantRev; got != want {
t.Errorf("GetEntry().Rev: %v, want %v", got, want)
}
})
t.Run(tc.desc+" GetEntryHistory", func(t *testing.T) {
fakeLog.TreeSize = tc.treeSize
resp2, err := srv.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: domainID,
})
if got, want := status.Code(err), tc.wantErr; got != want {
t.Errorf("ListEntryHistory(): %v, want %v", err, tc.wantErr)
}
if err != nil {
return
}
i := len(resp2.Values) - 1 // Get last value.
if got, want := resp2.Values[i].Smr.MapRevision, tc.wantRev; got != want {
t.Errorf("ListEntryHistory().Rev: %v, want %v", got, want)
}
})
}
}
|
package base
import (
"gonum.org/v1/gonum/stat"
"math"
)
// FuncSimilarity computes the similarity between a pair of vectors.
type FuncSimilarity func(a, b *SparseVector) float64
// Cosine computes the cosine similarity between a pair of vectors.
func CosineSimilarity(a, b *SparseVector) float64 {
m, n, l := .0, .0, .0
a.ForIntersection(b, func(index int, a, b float64) {
m += a * a
n += b * b
l += a * b
})
return l / (math.Sqrt(m) * math.Sqrt(n))
}
// MSD computes the Mean Squared Difference similarity between a pair of vectors.
func MSDSimilarity(a, b *SparseVector) float64 {
count, sum := 0.0, 0.0
a.ForIntersection(b, func(index int, a, b float64) {
sum += (a - b) * (a - b)
count += 1
})
return 1.0 / (sum/count + 1)
}
// Pearson computes the absolute Pearson correlation coefficient between a pair of vectors.
func PearsonSimilarity(a, b *SparseVector) float64 {
// Mean of a
meanA := stat.Mean(a.Values, nil)
// Mean of b
meanB := stat.Mean(b.Values, nil)
// Mean-centered cosine
m, n, l := .0, .0, .0
a.ForIntersection(b, func(index int, a, b float64) {
ratingA := a - meanA
ratingB := b - meanB
m += ratingA * ratingA
n += ratingB * ratingB
l += ratingA * ratingB
})
return math.Abs(l) / (math.Sqrt(m) * math.Sqrt(n))
}
|
package management
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
"github.com/caos/zitadel/internal/api/authz"
"github.com/caos/zitadel/pkg/grpc/management"
)
func (s *Server) SearchUserGrants(ctx context.Context, in *management.UserGrantSearchRequest) (*management.UserGrantSearchResponse, error) {
request := userGrantSearchRequestsToModel(in)
request.AppendMyOrgQuery(authz.GetCtxData(ctx).OrgID)
response, err := s.usergrant.SearchUserGrants(ctx, request)
if err != nil {
return nil, err
}
return userGrantSearchResponseFromModel(response), nil
}
func (s *Server) UserGrantByID(ctx context.Context, request *management.UserGrantID) (*management.UserGrantView, error) {
user, err := s.usergrant.UserGrantByID(ctx, request.Id)
if err != nil {
return nil, err
}
return userGrantViewFromModel(user), nil
}
func (s *Server) CreateUserGrant(ctx context.Context, in *management.UserGrantCreate) (*management.UserGrant, error) {
user, err := s.usergrant.AddUserGrant(ctx, userGrantCreateToModel(in))
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) UpdateUserGrant(ctx context.Context, in *management.UserGrantUpdate) (*management.UserGrant, error) {
user, err := s.usergrant.ChangeUserGrant(ctx, userGrantUpdateToModel(in))
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) DeactivateUserGrant(ctx context.Context, in *management.UserGrantID) (*management.UserGrant, error) {
user, err := s.usergrant.DeactivateUserGrant(ctx, in.Id)
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) ReactivateUserGrant(ctx context.Context, in *management.UserGrantID) (*management.UserGrant, error) {
user, err := s.usergrant.ReactivateUserGrant(ctx, in.Id)
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) RemoveUserGrant(ctx context.Context, in *management.UserGrantID) (*empty.Empty, error) {
err := s.usergrant.RemoveUserGrant(ctx, in.Id)
return &empty.Empty{}, err
}
func (s *Server) BulkRemoveUserGrant(ctx context.Context, in *management.UserGrantRemoveBulk) (*empty.Empty, error) {
err := s.usergrant.BulkRemoveUserGrant(ctx, userGrantRemoveBulkToModel(in)...)
return &empty.Empty{}, err
}
func (s *Server) SearchProjectUserGrants(ctx context.Context, in *management.ProjectUserGrantSearchRequest) (*management.UserGrantSearchResponse, error) {
request := projectUserGrantSearchRequestsToModel(in)
request.AppendMyOrgQuery(authz.GetCtxData(ctx).OrgID)
request.AppendProjectIDQuery(in.ProjectId)
response, err := s.usergrant.SearchUserGrants(ctx, request)
if err != nil {
return nil, err
}
return userGrantSearchResponseFromModel(response), nil
}
func (s *Server) ProjectUserGrantByID(ctx context.Context, request *management.ProjectUserGrantID) (*management.UserGrantView, error) {
user, err := s.usergrant.UserGrantByID(ctx, request.Id)
if err != nil {
return nil, err
}
return userGrantViewFromModel(user), nil
}
func (s *Server) CreateProjectUserGrant(ctx context.Context, in *management.UserGrantCreate) (*management.UserGrant, error) {
user, err := s.usergrant.AddUserGrant(ctx, userGrantCreateToModel(in))
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) UpdateProjectUserGrant(ctx context.Context, in *management.ProjectUserGrantUpdate) (*management.UserGrant, error) {
user, err := s.usergrant.ChangeUserGrant(ctx, projectUserGrantUpdateToModel(in))
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) DeactivateProjectUserGrant(ctx context.Context, in *management.ProjectUserGrantID) (*management.UserGrant, error) {
user, err := s.usergrant.DeactivateUserGrant(ctx, in.Id)
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) ReactivateProjectUserGrant(ctx context.Context, in *management.ProjectUserGrantID) (*management.UserGrant, error) {
user, err := s.usergrant.ReactivateUserGrant(ctx, in.Id)
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) SearchProjectGrantUserGrants(ctx context.Context, in *management.ProjectGrantUserGrantSearchRequest) (*management.UserGrantSearchResponse, error) {
grant, err := s.project.ProjectGrantByID(ctx, in.ProjectGrantId)
if err != nil {
return nil, err
}
request := projectGrantUserGrantSearchRequestsToModel(in)
request.AppendMyOrgQuery(authz.GetCtxData(ctx).OrgID)
request.AppendProjectIDQuery(grant.ProjectID)
response, err := s.usergrant.SearchUserGrants(ctx, request)
if err != nil {
return nil, err
}
return userGrantSearchResponseFromModel(response), nil
}
func (s *Server) ProjectGrantUserGrantByID(ctx context.Context, request *management.ProjectGrantUserGrantID) (*management.UserGrantView, error) {
user, err := s.usergrant.UserGrantByID(ctx, request.Id)
if err != nil {
return nil, err
}
return userGrantViewFromModel(user), nil
}
func (s *Server) CreateProjectGrantUserGrant(ctx context.Context, in *management.ProjectGrantUserGrantCreate) (*management.UserGrant, error) {
user, err := s.usergrant.AddUserGrant(ctx, projectGrantUserGrantCreateToModel(in))
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) UpdateProjectGrantUserGrant(ctx context.Context, in *management.ProjectGrantUserGrantUpdate) (*management.UserGrant, error) {
user, err := s.usergrant.ChangeUserGrant(ctx, projectGrantUserGrantUpdateToModel(in))
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) DeactivateProjectGrantUserGrant(ctx context.Context, in *management.ProjectGrantUserGrantID) (*management.UserGrant, error) {
user, err := s.usergrant.DeactivateUserGrant(ctx, in.Id)
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
func (s *Server) ReactivateProjectGrantUserGrant(ctx context.Context, in *management.ProjectGrantUserGrantID) (*management.UserGrant, error) {
user, err := s.usergrant.ReactivateUserGrant(ctx, in.Id)
if err != nil {
return nil, err
}
return usergrantFromModel(user), nil
}
|
package main
import (
"fmt"
"io"
"net/http"
"os"
"strings"
)
var buildOrder = []string{ // or else it will end weird...
"Body",
"Eyes",
"Shirts",
"Hair",
"Glasses",
"Hats_and_Hair_Accessories",
"Extras",
}
const input = ""
var guard = make(chan struct{}, 5)
func main() {
words := strings.Split(input, " ")
files := []string{}
donechan := make(chan bool)
toBuild := unique(words)
for i, word := range toBuild {
guard <- struct{}{}
go createGopher(i, word, donechan)
files = append(files, fmt.Sprintf("%d.png", i))
fmt.Printf("Started %d/%d\n", i+1, len(toBuild))
}
for i := 0; i < len(toBuild); i++ {
<-donechan
}
fmt.Println("Making collage")
collage(files)
}
func createGopher(i int, word string, out chan bool) {
defer func() { out <- true }()
url, err := createForWord(word)
if err != nil {
return
}
f, err := os.Create(fmt.Sprintf("%d.png", i))
if err != nil {
return
}
defer f.Close()
resp, err := http.Get(url)
if err != nil {
return
}
io.Copy(f, resp.Body)
<-guard
}
func unique(slice []string) []string {
keys := make(map[string]bool)
list := []string{}
for _, entry := range slice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
|
//Copyright 2019 Chris Wojno
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package vsql_engine
import (
"github.com/wojnosystems/vsql"
"github.com/wojnosystems/vsql_engine/engine_ware"
)
// SQLEnginer is the base engine with non-nested transactions
type SingleTXer interface {
SQLQueryer
// This Engine conforms to the MultiTXer interface
vsql.SQLer
// Enables the transactions to be used, but not nested transactions
engine_ware.BeginWare
// Group creates a copy of the middleware and context created thus far so you can have customized middleware for parts of your application
Group() SingleTXer
}
// SQLEnginer is the version of the engine with nested transactions
type MultiTXer interface {
SQLQueryer
// This Engine conforms to the MultiTXer interface
vsql.SQLNester
// Enables nested transactions
engine_ware.BeginNestedWare
// Group creates a copy of the middleware and context created thus far so you can have customized middleware for parts of your application
Group() MultiTXer
}
// SQLEngineQueryer is the part of the engine that only supports the operations common to both the nested and non-nested engine
// this includes regular queries (SELECT) and other operations on the database (DELETE/UPDATE/INSERT/ETC)
type SQLQueryer interface {
// Enables transactions to be committed
engine_ware.CommitWare
// Enables transactions to be rolled back
engine_ware.RollbackWare
// Enables (result-returning) queries to be run
engine_ware.QueryWare
// Enables INSERT INTO to be performed
engine_ware.InsertQueryWare
// Enables Exec to be called: DELETE, UPDATE, INSERT INTO
engine_ware.ExecWare
// Enables the ability to create statements
engine_ware.StatementPrepareWare
// Enables the ability to close statements
engine_ware.StatementCloseWare
// Enables Statement-based Queries
engine_ware.StatementQueryWare
// Enables Statement-based Inserts
engine_ware.StatementInsertQueryWare
// Enables Statement-based Execs
engine_ware.StatementExecQueryWare
// Enables result Rows records to be closed
engine_ware.RowsCloseWare
// Enables the next row to be fetched
engine_ware.RowsNextWare
// Enables the ability to ping the database server to check for connectivity
engine_ware.PingWare
// Enables the connection to be closed
engine_ware.ConnCloseWare
}
|
package stack
import (
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"golang.org/x/net/context"
)
type ServicesOptions struct {
quiet bool
format string
filter opts.FilterOpt
namespace string
}
func ListServices(ctx context.Context, client client.APIClient, options types.ServiceListOptions) ([]swarm.Service, error) {
return client.ServiceList(ctx, options)
}
|
package main
import (
"fmt"
"sync"
)
// worker_pool 概念,下面 的写法 就是 工作池 模式
// 目标 1个gorotuine 生产 100 个数据放入ch01,2个goroutine 消费 ch01数据 平方 操作后,放入 到 ch02 中,完成后,才再被读出来
var wg sync.WaitGroup
var ch01,ch02 chan int
var once sync.Once // 在 goroutine 中 指执行 1次
func f01(ch01 chan int){
defer wg.Done()
defer close(ch01)
for i := 1; i < 101; i++ {
ch01 <- i
}
}
func f02(ch01,ch02 chan int){
defer wg.Done()
for {
x,ok:= <-ch01
if !ok {
break
}
x *=x
ch02<-x
// fmt.Println("x平方值:",x)
}
once.Do(func(){ //如果 执行 过,就不会再执行
close(ch02)
})
}
func main(){
ch01 := make(chan int ,100)
ch02 := make(chan int ,100)
wg.Add(3)
go f01(ch01)
go f02(ch01,ch02)
go f02(ch01,ch02)
wg.Wait()
// close(ch02)
var i int = 0
for{ // channel 这种 每取走一个 长度会变 的 类型,range 处理不了
x,ok:=<-ch02
if !ok{
break
}
i++
fmt.Println("目标结果:",x , i)
}
fmt.Println("main 还活着")
} |
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
)
/*
Урок №6 пакет ioutil
Доп. задание: попробуйте передавать путь к файлу через аргумент, c которым запускаете программу. Подсказка - можно это сделать с помощью пакета flag)
go run task12.go -filename=task12.txt -path=./task11
*/
func main() {
var fileNames string
fileName := flag.String("filename", "", "file name")
path := flag.String("path", "", "file path")
flag.Parse()
if *path != "" {
if _, err := os.Stat(*path); os.IsNotExist(err) {
fmt.Println("err", err)
fmt.Println("Вы не ввели путь к файлу")
return
}
}
if *fileName == "" {
fmt.Println("Вы не ввели имя файла")
return
}
if string(*path)[0] != '/' || string(*path)[0] != '.' {
fileNames = `./` + *path + `/` + *fileName
} else {
fileNames = *path + `/` + *fileName
}
fmt.Println("Программа Работа с файлами")
Write(fileNames)
Read(fileNames)
}
func Write(fileName string) {
var result string
var b bytes.Buffer
i := 1
for {
fmt.Scan(&result)
if strings.ToLower(result) == "выход" {
break
}
dt := time.Now()
b.WriteString(fmt.Sprintf("%d. %s %s.\n", i, dt.Format("2006-02-01 15:04:05"), result))
i++
}
if err := ioutil.WriteFile(fileName, b.Bytes(), 0666); err != nil {
fmt.Println(err)
return
}
}
func Read(fileName string) {
f, err := os.Open(fileName)
defer f.Close()
result, err := ioutil.ReadAll(f)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("%s", result)
}
|
/*
* Convert a PDF to grayscale in a vectorized fashion, including images and all content.
*
* This advanced example demonstrates some of the more complex capabilities of UniPDF, showing the capability to process
* and transform objects and contents.
*
* Run as: go run pdf_grayscale_transform.go color.pdf output.pdf
*/
package main
import (
"errors"
"fmt"
"os"
unicommon "github.com/unidoc/unipdf/v3/common"
"github.com/unidoc/unipdf/v3/common/license"
pdfcontent "github.com/unidoc/unipdf/v3/contentstream"
pdfcore "github.com/unidoc/unipdf/v3/core"
pdf "github.com/unidoc/unipdf/v3/model"
"github.com/unidoc/unipdf/v3/ps"
)
const licenseKey = `
-----BEGIN UNIDOC LICENSE KEY-----
Free trial license keys are available at: https://unidoc.io/
-----END UNIDOC LICENSE KEY-----
`
func init() {
// Enable debug-level logging.
// unicommon.SetLogger(unicommon.NewConsoleLogger(unicommon.LogLevelDebug))
err := license.SetLicenseKey(licenseKey, `Company Name`)
if err != nil {
panic(err)
}
}
func main() {
if len(os.Args) < 3 {
fmt.Printf("Syntax: go run pdf_grayscale_transform.go input.pdf output.pdf\n")
os.Exit(1)
}
inputPath := os.Args[1]
outputPath := os.Args[2]
err := convertPdfToGrayscale(inputPath, outputPath)
if err != nil {
fmt.Printf("Failed: %v\n", err)
os.Exit(1)
}
fmt.Printf("Completed, see output %s\n", outputPath)
}
func convertPdfToGrayscale(inputPath, outputPath string) error {
pdfWriter := pdf.NewPdfWriter()
f, err := os.Open(inputPath)
if err != nil {
return err
}
defer f.Close()
pdfReader, err := pdf.NewPdfReader(f)
if err != nil {
return err
}
isEncrypted, err := pdfReader.IsEncrypted()
if err != nil {
return err
}
// Try decrypting with an empty one.
if isEncrypted {
auth, err := pdfReader.Decrypt([]byte(""))
if err != nil {
// Encrypted and we cannot do anything about it.
return err
}
if !auth {
return errors.New("Need to decrypt with password")
}
}
numPages, err := pdfReader.GetNumPages()
if err != nil {
return err
}
fmt.Printf("PDF Num Pages: %d\n", numPages)
for i := 0; i < numPages; i++ {
fmt.Printf("Processing page %d/%d\n", i+1, numPages)
page, err := pdfReader.GetPage(i + 1)
if err != nil {
return err
}
err = convertPageToGrayscale(page)
if err != nil {
return err
}
err = pdfWriter.AddPage(page)
if err != nil {
return err
}
}
fWrite, err := os.Create(outputPath)
if err != nil {
return err
}
defer fWrite.Close()
err = pdfWriter.Write(fWrite)
if err != nil {
return err
}
return nil
}
// Replaces color objects on the page with grayscale ones. Also references XObject Images and Forms
// to convert those to grayscale.
func convertPageToGrayscale(page *pdf.PdfPage) error {
// For each page, we go through the resources and look for the images.
contents, err := page.GetAllContentStreams()
if err != nil {
return err
}
grayContent, err := transformContentStreamToGrayscale(contents, page.Resources)
if err != nil {
return err
}
page.SetContentStreams([]string{string(grayContent)}, pdfcore.NewFlateEncoder())
//fmt.Printf("Processed contents: %s\n", grayContent)
return nil
}
// Check if colorspace represents a Pattern colorspace.
func isPatternCS(cs pdf.PdfColorspace) bool {
_, isPattern := cs.(*pdf.PdfColorspaceSpecialPattern)
return isPattern
}
func transformContentStreamToGrayscale(contents string, resources *pdf.PdfPageResources) ([]byte, error) {
cstreamParser := pdfcontent.NewContentStreamParser(contents)
operations, err := cstreamParser.Parse()
if err != nil {
return nil, err
}
processedOperations := &pdfcontent.ContentStreamOperations{}
transformedPatterns := map[pdfcore.PdfObjectName]bool{} // List of already transformed patterns. Avoid multiple conversions.
transformedShadings := map[pdfcore.PdfObjectName]bool{} // List of already transformed shadings. Avoid multiple conversions.
// The content stream processor keeps track of the graphics state and we can make our own handlers to process certain commands,
// using the AddHandler method. In this case, we hook up to color related operands, and for image and form handling.
processor := pdfcontent.NewContentStreamProcessor(*operations)
// Add handlers for colorspace related functionality.
processor.AddHandler(pdfcontent.HandlerConditionEnumAllOperands, "",
func(op *pdfcontent.ContentStreamOperation, gs pdfcontent.GraphicsState, resources *pdf.PdfPageResources) error {
operand := op.Operand
switch operand {
case "CS": // Set colorspace operands (stroking).
if isPatternCS(gs.ColorspaceStroking) {
// If referring to a pattern colorspace with an external definition, need to update the definition.
// If has an underlying colorspace, then go and change it to DeviceGray.
// Needs to be specified externally in the colorspace resources.
csname := op.Params[0].(*pdfcore.PdfObjectName)
if *csname != "Pattern" {
// Update if referring to an external colorspace in resources.
cs, ok := resources.GetColorspaceByName(*csname)
if !ok {
unicommon.Log.Debug("Undefined colorspace for pattern (%s)", csname)
return errors.New("Colorspace not defined")
}
patternCS, ok := cs.(*pdf.PdfColorspaceSpecialPattern)
if !ok {
return errors.New("Type error")
}
if patternCS.UnderlyingCS != nil {
// Swap out for a gray colorspace.
patternCS.UnderlyingCS = pdf.NewPdfColorspaceDeviceGray()
}
err = resources.SetColorspaceByName(*csname, patternCS)
if err != nil {
return err
}
}
*processedOperations = append(*processedOperations, op)
return nil
}
op := pdfcontent.ContentStreamOperation{}
op.Operand = operand
op.Params = []pdfcore.PdfObject{pdfcore.MakeName("DeviceGray")}
*processedOperations = append(*processedOperations, &op)
return nil
case "cs": // Set colorspace operands (non-stroking).
if isPatternCS(gs.ColorspaceNonStroking) {
// If referring to a pattern colorspace with an external definition, need to update the definition.
// If has an underlying colorspace, then go and change it to DeviceGray.
// Needs to be specified externally in the colorspace resources.
csname := op.Params[0].(*pdfcore.PdfObjectName)
if *csname != "Pattern" {
// Update if referring to an external colorspace in resources.
cs, ok := resources.GetColorspaceByName(*csname)
if !ok {
unicommon.Log.Debug("Undefined colorspace for pattern (%s)", csname)
return errors.New("Colorspace not defined")
}
patternCS, ok := cs.(*pdf.PdfColorspaceSpecialPattern)
if !ok {
return errors.New("Type error")
}
if patternCS.UnderlyingCS != nil {
// Swap out for a gray colorspace.
patternCS.UnderlyingCS = pdf.NewPdfColorspaceDeviceGray()
}
resources.SetColorspaceByName(*csname, patternCS)
}
*processedOperations = append(*processedOperations, op)
return nil
}
op := pdfcontent.ContentStreamOperation{}
op.Operand = operand
op.Params = []pdfcore.PdfObject{pdfcore.MakeName("DeviceGray")}
*processedOperations = append(*processedOperations, &op)
return nil
case "SC", "SCN": // Set stroking color. Includes pattern colors.
if isPatternCS(gs.ColorspaceStroking) {
op := pdfcontent.ContentStreamOperation{}
op.Operand = operand
op.Params = []pdfcore.PdfObject{}
patternColor, ok := gs.ColorStroking.(*pdf.PdfColorPattern)
if !ok {
return errors.New("Invalid stroking color type")
}
if patternColor.Color != nil {
color, err := gs.ColorspaceStroking.ColorToRGB(patternColor.Color)
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
rgbColor := color.(*pdf.PdfColorDeviceRGB)
grayColor := rgbColor.ToGray()
op.Params = append(op.Params, pdfcore.MakeFloat(grayColor.Val()))
}
if _, has := transformedPatterns[patternColor.PatternName]; has {
// Already processed, need not change anything, except underlying color if used.
op.Params = append(op.Params, pdfcore.MakeName(string(patternColor.PatternName)))
*processedOperations = append(*processedOperations, &op)
return nil
}
transformedPatterns[patternColor.PatternName] = true
// Look up the pattern name and convert it.
pattern, found := resources.GetPatternByName(patternColor.PatternName)
if !found {
return errors.New("Undefined pattern name")
}
grayPattern, err := convertPatternToGray(pattern)
if err != nil {
unicommon.Log.Debug("Unable to convert pattern to grayscale: %v", err)
return err
}
resources.SetPatternByName(patternColor.PatternName, grayPattern.ToPdfObject())
op.Params = append(op.Params, pdfcore.MakeName(string(patternColor.PatternName)))
*processedOperations = append(*processedOperations, &op)
} else {
color, err := gs.ColorspaceStroking.ColorToRGB(gs.ColorStroking)
if err != nil {
fmt.Printf("Error with ColorToRGB: %v\n", err)
return err
}
rgbColor := color.(*pdf.PdfColorDeviceRGB)
grayColor := rgbColor.ToGray()
op := pdfcontent.ContentStreamOperation{}
op.Operand = operand
op.Params = []pdfcore.PdfObject{pdfcore.MakeFloat(grayColor.Val())}
*processedOperations = append(*processedOperations, &op)
}
return nil
case "sc", "scn": // Set nonstroking color.
if isPatternCS(gs.ColorspaceNonStroking) {
op := pdfcontent.ContentStreamOperation{}
op.Operand = operand
op.Params = []pdfcore.PdfObject{}
patternColor, ok := gs.ColorNonStroking.(*pdf.PdfColorPattern)
if !ok {
return errors.New("Invalid stroking color type")
}
if patternColor.Color != nil {
color, err := gs.ColorspaceNonStroking.ColorToRGB(patternColor.Color)
if err != nil {
fmt.Printf("Error : %v\n", err)
return err
}
rgbColor := color.(*pdf.PdfColorDeviceRGB)
grayColor := rgbColor.ToGray()
op.Params = append(op.Params, pdfcore.MakeFloat(grayColor.Val()))
}
if _, has := transformedPatterns[patternColor.PatternName]; has {
// Already processed, need not change anything, except underlying color if used.
op.Params = append(op.Params, pdfcore.MakeName(string(patternColor.PatternName)))
*processedOperations = append(*processedOperations, &op)
return nil
}
transformedPatterns[patternColor.PatternName] = true
// Look up the pattern name and convert it.
pattern, found := resources.GetPatternByName(patternColor.PatternName)
if !found {
return errors.New("Undefined pattern name")
}
grayPattern, err := convertPatternToGray(pattern)
if err != nil {
unicommon.Log.Debug("Unable to convert pattern to grayscale: %v", err)
return err
}
resources.SetPatternByName(patternColor.PatternName, grayPattern.ToPdfObject())
op.Params = append(op.Params, pdfcore.MakeName(string(patternColor.PatternName)))
*processedOperations = append(*processedOperations, &op)
} else {
color, err := gs.ColorspaceNonStroking.ColorToRGB(gs.ColorNonStroking)
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
rgbColor := color.(*pdf.PdfColorDeviceRGB)
grayColor := rgbColor.ToGray()
op := pdfcontent.ContentStreamOperation{}
op.Operand = operand
op.Params = []pdfcore.PdfObject{pdfcore.MakeFloat(grayColor.Val())}
*processedOperations = append(*processedOperations, &op)
}
return nil
case "RG", "K": // Set RGB or CMYK stroking color.
color, err := gs.ColorspaceStroking.ColorToRGB(gs.ColorStroking)
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
rgbColor := color.(*pdf.PdfColorDeviceRGB)
grayColor := rgbColor.ToGray()
op := pdfcontent.ContentStreamOperation{}
op.Operand = "G"
op.Params = []pdfcore.PdfObject{pdfcore.MakeFloat(grayColor.Val())}
*processedOperations = append(*processedOperations, &op)
return nil
case "rg", "k": // Set RGB or CMYK as nonstroking color.
color, err := gs.ColorspaceNonStroking.ColorToRGB(gs.ColorNonStroking)
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
rgbColor := color.(*pdf.PdfColorDeviceRGB)
grayColor := rgbColor.ToGray()
op := pdfcontent.ContentStreamOperation{}
op.Operand = "g"
op.Params = []pdfcore.PdfObject{pdfcore.MakeFloat(grayColor.Val())}
*processedOperations = append(*processedOperations, &op)
return nil
case "sh": // Paints the shape and color defined by shading dict.
if len(op.Params) != 1 {
return errors.New("Params to sh operator should be 1")
}
shname, ok := op.Params[0].(*pdfcore.PdfObjectName)
if !ok {
return errors.New("sh parameter should be a name")
}
if _, has := transformedShadings[*shname]; has {
// Already processed, no need to do anything.
*processedOperations = append(*processedOperations, op)
return nil
}
transformedShadings[*shname] = true
shading, found := resources.GetShadingByName(*shname)
if !found {
return errors.New("Shading not defined in resources")
}
grayShading, err := convertShadingToGray(shading)
if err != nil {
return err
}
resources.SetShadingByName(*shname, grayShading.GetContext().ToPdfObject())
}
*processedOperations = append(*processedOperations, op)
return nil
})
// Add handler for image related handling. Note that inline images are completely stored with a ContentStreamInlineImage
// object as the parameter for BI.
processor.AddHandler(pdfcontent.HandlerConditionEnumOperand, "BI",
func(op *pdfcontent.ContentStreamOperation, gs pdfcontent.GraphicsState, resources *pdf.PdfPageResources) error {
if len(op.Params) != 1 {
fmt.Printf("BI Error invalid number of params\n")
return errors.New("invalid number of parameters")
}
// Inline image.
iimg, ok := op.Params[0].(*pdfcontent.ContentStreamInlineImage)
if !ok {
fmt.Printf("Error: Invalid handling for inline image\n")
return errors.New("Invalid inline image parameter")
}
img, err := iimg.ToImage(resources)
if err != nil {
fmt.Printf("Error converting inline image to image: %v\n", err)
return err
}
cs, err := iimg.GetColorSpace(resources)
if err != nil {
fmt.Printf("Error getting color space for inline image: %v\n", err)
return err
}
rgbImg, err := cs.ImageToRGB(*img)
if err != nil {
fmt.Printf("Error converting image to rgb: %v\n", err)
return err
}
rgbColorSpace := pdf.NewPdfColorspaceDeviceRGB()
grayImage, err := rgbColorSpace.ImageToGray(rgbImg)
if err != nil {
fmt.Printf("Error converting img to gray: %v\n", err)
return err
}
// Update the XObject image.
// Use same encoder as input data. Make sure for DCT filter it is updated to 1 color component.
encoder, err := iimg.GetEncoder()
if err != nil {
fmt.Printf("Error getting encoder for inline image: %v\n", err)
return err
}
if dctEncoder, is := encoder.(*pdfcore.DCTEncoder); is {
dctEncoder.ColorComponents = 1
}
grayInlineImg, err := pdfcontent.NewInlineImageFromImage(grayImage, encoder)
if err != nil {
if err == pdfcore.ErrUnsupportedEncodingParameters {
// Unsupported encoding parameters, revert to a basic flate encoder without predictor.
encoder = pdfcore.NewFlateEncoder()
}
// Try again, fail on error.
grayInlineImg, err = pdfcontent.NewInlineImageFromImage(grayImage, encoder)
if err != nil {
fmt.Printf("Error making a new inline image object: %v\n", err)
return err
}
}
// Replace inline image data with the gray image.
pOp := pdfcontent.ContentStreamOperation{}
pOp.Operand = "BI"
pOp.Params = []pdfcore.PdfObject{grayInlineImg}
*processedOperations = append(*processedOperations, &pOp)
return nil
})
// Handler for XObject Image and Forms.
processedXObjects := map[string]bool{} // Keep track of processed XObjects to avoid repetition.
processor.AddHandler(pdfcontent.HandlerConditionEnumOperand, "Do",
func(op *pdfcontent.ContentStreamOperation, gs pdfcontent.GraphicsState, resources *pdf.PdfPageResources) error {
if len(op.Params) < 1 {
fmt.Printf("ERROR: Invalid number of params for Do object.\n")
return errors.New("Range check")
}
// XObject.
name := op.Params[0].(*pdfcore.PdfObjectName)
// Only process each one once.
_, has := processedXObjects[string(*name)]
if has {
return nil
}
processedXObjects[string(*name)] = true
_, xtype := resources.GetXObjectByName(*name)
if xtype == pdf.XObjectTypeImage {
//fmt.Printf(" XObject Image: %s\n", *name)
ximg, err := resources.GetXObjectImageByName(*name)
if err != nil {
fmt.Printf("Error w/GetXObjectImageByName : %v\n", err)
return err
}
img, err := ximg.ToImage()
if err != nil {
fmt.Printf("Error w/ToImage: %v\n", err)
return err
}
rgbImg, err := ximg.ColorSpace.ImageToRGB(*img)
if err != nil {
fmt.Printf("Error ImageToRGB: %v\n", err)
return err
}
rgbColorSpace := pdf.NewPdfColorspaceDeviceRGB()
grayImage, err := rgbColorSpace.ImageToGray(rgbImg)
if err != nil {
fmt.Printf("Error ImageToGray: %v\n", err)
return err
}
// Update the XObject image.
// Use same encoder as input data. Make sure for DCT filter it is updated to 1 color component.
encoder := ximg.Filter
if dctEncoder, is := encoder.(*pdfcore.DCTEncoder); is {
dctEncoder.ColorComponents = 1
}
ximgGray, err := pdf.NewXObjectImageFromImage(&grayImage, nil, encoder)
if err != nil {
if err == pdfcore.ErrUnsupportedEncodingParameters {
// Unsupported encoding parameters, revert to a basic flate encoder without predictor.
encoder = pdfcore.NewFlateEncoder()
}
// Try again, fail if error.
ximgGray, err = pdf.NewXObjectImageFromImage(&grayImage, nil, encoder)
if err != nil {
fmt.Printf("Error creating image: %v\n", err)
return err
}
}
// Update the entry.
err = resources.SetXObjectImageByName(*name, ximgGray)
if err != nil {
fmt.Printf("Failed setting x object: %v (%s)\n", err, string(*name))
return err
}
} else if xtype == pdf.XObjectTypeForm {
//fmt.Printf(" XObject Form: %s\n", *name)
// Go through the XObject Form content stream.
xform, err := resources.GetXObjectFormByName(*name)
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
formContent, err := xform.GetContentStream()
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
// Process the content stream in the Form object too:
// XXX/TODO/Consider: Use either form resources (priority) and fall back to page resources alternatively if not found.
// Have not come into cases where needed yet.
formResources := xform.Resources
if formResources == nil {
formResources = resources
}
// Process the content stream in the Form object too:
grayContent, err := transformContentStreamToGrayscale(string(formContent), formResources)
if err != nil {
fmt.Printf("Error: %v\n", err)
return err
}
xform.SetContentStream(grayContent, nil)
// Update the resource entry.
resources.SetXObjectFormByName(*name, xform)
}
return nil
})
err = processor.Process(resources)
if err != nil {
fmt.Printf("Error processing: %v\n", err)
return nil, err
}
// For debug purposes: (high level logging).
//
//fmt.Printf("=== Unprocessed - Full list\n")
//for idx, op := range operations {
// fmt.Printf("U. Operation %d: %s - Params: %v\n", idx+1, op.Operand, op.Params)
//}
//fmt.Printf("=== Processed - Full list\n")
//for idx, op := range *processedOperations {
// fmt.Printf("P. Operation %d: %s - Params: %v\n", idx+1, op.Operand, op.Params)
//}
return processedOperations.Bytes(), nil
}
// Convert a pattern to grayscale (tiling or shading pattern).
func convertPatternToGray(pattern *pdf.PdfPattern) (*pdf.PdfPattern, error) {
// Case 1: Colored tiling patterns. Need to process the content stream and replace.
if pattern.IsTiling() {
tilingPattern := pattern.GetAsTilingPattern()
if tilingPattern.IsColored() {
// A colored tiling pattern can use color operators in its stream, need to process the stream.
content, err := tilingPattern.GetContentStream()
if err != nil {
return nil, err
}
grayContents, err := transformContentStreamToGrayscale(string(content), tilingPattern.Resources)
if err != nil {
return nil, err
}
tilingPattern.SetContentStream(grayContents, nil)
// Update in-memory pdf objects.
_ = tilingPattern.ToPdfObject()
}
} else if pattern.IsShading() {
// Case 2: Shading patterns. Need to create a new colorspace that can map from N=3,4 colorspaces to grayscale.
shadingPattern := pattern.GetAsShadingPattern()
grayShading, err := convertShadingToGray(shadingPattern.Shading)
if err != nil {
return nil, err
}
shadingPattern.Shading = grayShading
// Update in-memory pdf objects.
_ = shadingPattern.ToPdfObject()
}
return pattern, nil
}
// Convert shading to grayscale.
// This one is slightly involved as a shading defines a color as function of position, i.e. color(x,y) = F(x,y).
// Since the function can be challenging to change, we define new DeviceN colorspace with a color conversion
// function.
func convertShadingToGray(shading *pdf.PdfShading) (*pdf.PdfShading, error) {
cs := shading.ColorSpace
if cs.GetNumComponents() == 1 {
// Already grayscale, should be fine. No action taken.
return shading, nil
} else if cs.GetNumComponents() == 3 {
// Create a new DeviceN colorspace that converts R,G,B -> Grayscale
// Use: gray := 0.3*R + 0.59G + 0.11B
// PS program: { 0.11 mul exch 0.59 mul add exch 0.3 mul add }.
transformFunc := &pdf.PdfFunctionType4{}
transformFunc.Domain = []float64{0, 1, 0, 1, 0, 1}
transformFunc.Range = []float64{0, 1}
rgbToGrayPsProgram := ps.NewPSProgram()
rgbToGrayPsProgram.Append(ps.MakeReal(0.11))
rgbToGrayPsProgram.Append(ps.MakeOperand("mul"))
rgbToGrayPsProgram.Append(ps.MakeOperand("exch"))
rgbToGrayPsProgram.Append(ps.MakeReal(0.59))
rgbToGrayPsProgram.Append(ps.MakeOperand("mul"))
rgbToGrayPsProgram.Append(ps.MakeOperand("add"))
rgbToGrayPsProgram.Append(ps.MakeOperand("exch"))
rgbToGrayPsProgram.Append(ps.MakeReal(0.3))
rgbToGrayPsProgram.Append(ps.MakeOperand("mul"))
rgbToGrayPsProgram.Append(ps.MakeOperand("add"))
transformFunc.Program = rgbToGrayPsProgram
// Define the DeviceN colorspace that performs the R,G,B -> Gray conversion for us.
transformcs := pdf.NewPdfColorspaceDeviceN()
transformcs.AlternateSpace = pdf.NewPdfColorspaceDeviceGray()
transformcs.ColorantNames = pdfcore.MakeArray(pdfcore.MakeName("R"), pdfcore.MakeName("G"), pdfcore.MakeName("B"))
transformcs.TintTransform = transformFunc
// Replace the old colorspace with the new.
shading.ColorSpace = transformcs
return shading, nil
} else if cs.GetNumComponents() == 4 {
// Create a new DeviceN colorspace that converts C,M,Y,K -> Grayscale.
// Use: gray = 1.0 - min(1.0, 0.3*C + 0.59*M + 0.11*Y + K) ; where BG(k) = k simply.
// PS program: {exch 0.11 mul add exch 0.59 mul add exch 0.3 mul add dup 1.0 ge { pop 1.0 } if}
transformFunc := &pdf.PdfFunctionType4{}
transformFunc.Domain = []float64{0, 1, 0, 1, 0, 1, 0, 1}
transformFunc.Range = []float64{0, 1}
cmykToGrayPsProgram := ps.NewPSProgram()
cmykToGrayPsProgram.Append(ps.MakeOperand("exch"))
cmykToGrayPsProgram.Append(ps.MakeReal(0.11))
cmykToGrayPsProgram.Append(ps.MakeOperand("mul"))
cmykToGrayPsProgram.Append(ps.MakeOperand("add"))
cmykToGrayPsProgram.Append(ps.MakeOperand("exch"))
cmykToGrayPsProgram.Append(ps.MakeReal(0.59))
cmykToGrayPsProgram.Append(ps.MakeOperand("mul"))
cmykToGrayPsProgram.Append(ps.MakeOperand("add"))
cmykToGrayPsProgram.Append(ps.MakeOperand("exch"))
cmykToGrayPsProgram.Append(ps.MakeReal(0.30))
cmykToGrayPsProgram.Append(ps.MakeOperand("mul"))
cmykToGrayPsProgram.Append(ps.MakeOperand("add"))
cmykToGrayPsProgram.Append(ps.MakeOperand("dup"))
cmykToGrayPsProgram.Append(ps.MakeReal(1.0))
cmykToGrayPsProgram.Append(ps.MakeOperand("ge"))
// Add sub procedure.
subProc := ps.NewPSProgram()
subProc.Append(ps.MakeOperand("pop"))
subProc.Append(ps.MakeReal(1.0))
cmykToGrayPsProgram.Append(subProc)
cmykToGrayPsProgram.Append(ps.MakeOperand("if"))
transformFunc.Program = cmykToGrayPsProgram
// Define the DeviceN colorspace that performs the R,G,B -> Gray conversion for us.
transformcs := pdf.NewPdfColorspaceDeviceN()
transformcs.AlternateSpace = pdf.NewPdfColorspaceDeviceGray()
transformcs.ColorantNames = pdfcore.MakeArray(pdfcore.MakeName("C"), pdfcore.MakeName("M"), pdfcore.MakeName("Y"), pdfcore.MakeName("K"))
transformcs.TintTransform = transformFunc
// Replace the old colorspace with the new.
shading.ColorSpace = transformcs
return shading, nil
} else {
unicommon.Log.Debug("Cannot convert to shading pattern grayscale, color space N = %d", cs.GetNumComponents())
return nil, errors.New("Unsupported pattern colorspace for grayscale conversion")
}
}
|
package main
import (
fiber "github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/pagongamedev/go-dd/example/ex003_repo_switcher/notexample"
"github.com/pagongamedev/go-dd/example/ex003_repo_switcher/repo01"
"github.com/pagongamedev/go-dd/example/ex003_repo_switcher/repo02"
"github.com/pagongamedev/go-dd/example/ex003_repo_switcher/service"
godd "github.com/pagongamedev/go-dd"
goddPortal "github.com/pagongamedev/go-dd/portal"
)
// Can test api by localhost:8081/hello/v1/hello
// Example ex003_repo_switcher
func main() {
portal := goddPortal.New()
portal.AppendApp(appMain(), ":8081")
portal.StartServer()
}
func appMain() *fiber.App {
app := fiber.New()
// Use App Middleware
app.Use(cors.New())
app.Use(logger.New())
// Repository Pattern Switcher
env := "localhost"
funcRepository := godd.EnvironmentSwitcher(env, 0, 0, 1, 1, 1,
repo01.NewRepository, repo02.NewRepository)
repoHello, err := funcRepository.(func() (service.Repository, error))()
godd.MustError(err)
// Manage Service
serviceHello, err := service.NewService(repoHello)
godd.MustError(err)
notexample.Router(app, "/hello/v1", serviceHello)
return app
}
|
package middle
type User struct {
User string `form:"user" json:"user" binding:"required"`
Pwd string `form:"pwd" json:"pwd" binding:"required"`
}
|
package geoip
import (
"errors"
"fmt"
"log"
"net"
"net/http"
"strconv"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/mmcloughlin/geohash"
"github.com/oschwald/maxminddb-golang"
"go.uber.org/zap"
)
// Geoip represents a middleware instance
type Geoip struct {
BlockList struct {
Country []string
IP []net.IP
}
AllowList struct {
AllowOnly bool
Country []string
IP []net.IP
}
RequestIP net.IP
RequestCountry struct {
Name string
Code string
}
DatabasePath string
DBHandler *maxminddb.Reader
logger *zap.Logger
}
// geoIPRecord struct
type geoIPRecord struct {
Country struct {
ISOCode string `maxminddb:"iso_code"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
Names map[string]string `maxminddb:"names"`
GeoNameID uint64 `maxminddb:"geoname_id"`
} `maxminddb:"country"`
City struct {
Names map[string]string `maxminddb:"names"`
GeoNameID uint64 `maxminddb:"geoname_id"`
} `maxminddb:"city"`
Location struct {
Latitude float64 `maxminddb:"latitude"`
Longitude float64 `maxminddb:"longitude"`
TimeZone string `maxminddb:"time_zone"`
} `maxminddb:"location"`
}
func init() {
caddy.RegisterModule(Geoip{})
httpcaddyfile.RegisterHandlerDirective("geoip", parseCaddyfile)
}
// CaddyModule returns the Caddy module information.
func (Geoip) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.geoip",
New: func() caddy.Module { return new(Geoip) },
}
}
// Provision implements caddy.Provisioner.
func (g *Geoip) Provision(ctx caddy.Context) error {
dbhandler, err := maxminddb.Open(g.DatabasePath)
if err != nil {
return fmt.Errorf("geoip: Can't open database: " + g.DatabasePath)
}
g.DBHandler = dbhandler
g.logger = ctx.Logger(g) // g.logger is a *zap.Logger
return nil
}
// ServeHTTP implements caddyhttp.GeoipHandler.
func (g Geoip) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
g.lookupLocation(w, r)
if g.AllowList.AllowOnly {
// Only ip in Allowlist can visit the website.
goto CheckAllowList
} else {
for _, country := range g.BlockList.Country {
if country == g.RequestCountry.Code {
goto CheckAllowList
}
}
for _, ip := range g.BlockList.IP {
if ip.String() == g.RequestIP.String() {
goto CheckAllowList
}
}
return next.ServeHTTP(w, r)
}
CheckAllowList:
for _, country := range g.AllowList.Country {
if country == g.RequestCountry.Code {
return next.ServeHTTP(w, r)
}
}
for _, ip := range g.AllowList.IP {
if ip.String() == g.RequestIP.String() {
return next.ServeHTTP(w, r)
}
}
return caddyhttp.Error(http.StatusForbidden, fmt.Errorf("forbidden"))
}
func (g *Geoip) lookupLocation(w http.ResponseWriter, r *http.Request) {
record := g.fetchGeoipData(r)
g.RequestCountry.Code = record.Country.ISOCode
g.RequestCountry.Name = record.Country.Names["en"]
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
repl.Set("geoip_country_code", record.Country.ISOCode)
repl.Set("geoip_country_name", record.Country.Names["en"])
repl.Set("geoip_country_eu", strconv.FormatBool(record.Country.IsInEuropeanUnion))
repl.Set("geoip_country_geoname_id", strconv.FormatUint(record.Country.GeoNameID, 10))
repl.Set("geoip_city_name", record.City.Names["en"])
repl.Set("geoip_city_geoname_id", strconv.FormatUint(record.City.GeoNameID, 10))
repl.Set("geoip_latitude", strconv.FormatFloat(record.Location.Latitude, 'f', 6, 64))
repl.Set("geoip_longitude", strconv.FormatFloat(record.Location.Longitude, 'f', 6, 64))
repl.Set("geoip_geohash", geohash.Encode(record.Location.Latitude, record.Location.Longitude))
repl.Set("geoip_time_zone", record.Location.TimeZone)
}
func (g *Geoip) fetchGeoipData(r *http.Request) geoIPRecord {
clientIP, _ := getClientIP(r)
g.RequestIP = clientIP
var record = geoIPRecord{}
err := g.DBHandler.Lookup(clientIP, &record)
if err != nil {
g.logger.Warn("Lookup IP error: err", zap.String("err", err.Error()))
}
if record.Country.ISOCode == "" {
record.Country.Names = make(map[string]string)
record.City.Names = make(map[string]string)
if clientIP.IsLoopback() {
record.Country.ISOCode = "**"
record.Country.Names["en"] = "Loopback"
record.City.Names["en"] = "Loopback"
} else {
record.Country.ISOCode = "!!"
record.Country.Names["en"] = "No Country"
record.City.Names["en"] = "No City"
}
}
return record
}
func getClientIP(r *http.Request) (net.IP, error) {
var ip string
// Use the client ip from the 'X-Forwarded-For' header, if available.
if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" {
ips := strings.Split(fwdFor, ", ")
ip = ips[0]
} else {
// Otherwise, get the client ip from the request remote address.
var err error
ip, _, err = net.SplitHostPort(r.RemoteAddr)
if err != nil {
if serr, ok := err.(*net.AddrError); ok && serr.Err == "missing port in address" {
// It's not critical try parse
ip = r.RemoteAddr
} else {
log.Printf("Error when SplitHostPort: %v", serr.Err)
return nil, err
}
}
}
// Parse the ip address string into a net IP.
parsedIP := net.ParseIP(ip)
if parsedIP == nil {
return nil, errors.New("unable to parse address")
}
return parsedIP, nil
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (g *Geoip) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
args := d.RemainingArgs()
if len(args) != 1 {
return d.ArgErr()
}
g.DatabasePath = args[0]
for d.NextBlock(0) {
switch d.Val() {
case "block_list":
for d.NextBlock(1) {
switch d.Val() {
case "country":
args := d.RemainingArgs()
g.BlockList.Country = append(g.BlockList.Country, args...)
case "ip":
args := d.RemainingArgs()
for _, ip := range args {
g.BlockList.IP = append(g.BlockList.IP, net.ParseIP(ip))
}
default:
return d.Errf("unrecognized subdirective in geoip block_list %s", d.Val())
}
}
case "allow_list":
for d.NextBlock(1) {
switch d.Val() {
case "allow_only":
args := d.RemainingArgs()
allowOnly, err := strconv.ParseBool(args[0])
if len(args) != 1 || err != nil {
return d.ArgErr()
}
g.AllowList.AllowOnly = allowOnly
case "country":
args := d.RemainingArgs()
g.AllowList.Country = append(g.AllowList.Country, args...)
case "ip":
args := d.RemainingArgs()
for _, ip := range args {
g.AllowList.IP = append(g.AllowList.IP, net.ParseIP(ip))
}
default:
return d.Errf("unrecognized subdirective in geoip allow_list %s", d.Val())
}
}
default:
return d.Errf("unrecognized subdirective in geoip plugin %s", d.Val())
}
}
}
return nil
}
// parseCaddyfile unmarshals tokens from h into a new Geoip.
func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
var m Geoip
err := m.UnmarshalCaddyfile(h.Dispenser)
return m, err
}
// Interface guards
var (
_ caddy.Provisioner = (*Geoip)(nil)
_ caddyhttp.MiddlewareHandler = (*Geoip)(nil)
_ caddyfile.Unmarshaler = (*Geoip)(nil)
)
|
package log
import "time"
// Line is log data
type Line struct {
Level Level
Fields Fields
Source Source
Time time.Time
Args []interface{}
}
func NewLine(lvl Level, fields Fields, src Source, time time.Time, args []interface{}) Line {
return Line{
Level: lvl,
Fields: fields,
Source: src,
Time: time,
Args: args,
}
}
|
package LeetCode
import (
"fmt"
)
func Code526() {
fmt.Println(countArrangement(4))
}
/**
假设有从 1 到 N 的 N 个整数,如果从这 N 个数字中成功构造出一个数组,使得数组的第 i 位 (1 <= i <= N) 满足如下两个条件中的一个,我们就称这个数组为一个优美的排列。条件:
第 i 位的数字能被 i 整除
i 能被第 i 位上的数字整除
现在给定一个整数 N,请问可以构造多少个优美的排列?
示例1:
输入: 2
输出: 2
解释:
第 1 个优美的排列是 [1, 2]:
第 1 个位置(i=1)上的数字是1,1能被 i(i=1)整除
第 2 个位置(i=2)上的数字是2,2能被 i(i=2)整除
第 2 个优美的排列是 [2, 1]:
第 1 个位置(i=1)上的数字是2,2能被 i(i=1)整除
第 2 个位置(i=2)上的数字是1,i(i=2)能被 1 整除
说明:
N 是一个正整数,并且不会超过15。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/beautiful-arrangement
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func countArrangement(N int) int {
visited := make(map[int]bool, N)
var res int
dfs_526(N, 1, visited, &res)
return res
}
func dfs_526(N int, position int, visited map[int]bool, res *int) {
if position > N {
*res++
}
for i := 1; i <= N; i++ {
if !visited[i] && (position%i == 0 || i%position == 0) {
visited[i] = true
dfs_526(N, position+1, visited, res)
visited[i] = false
}
}
}
|
//this is storage for oath modules
package mgostore
import (
"encoding/json"
"fmt"
"github.com/idobn/osin"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var _ = fmt.Printf
//some collection name
const (
CLIENT_COL = "clients"
AUTHORIZE_COL = "authorizations"
ACCESS_COL = "accesses"
)
const REFRESHTOKEN = "refreshToken"
//keep session to mgo
type OAuthStorage struct {
dbName string
Session *mgo.Session
}
//initialize new storage -- should put global mgo session into
func New(session *mgo.Session, dbName string) *OAuthStorage {
storage := &OAuthStorage{dbName, session}
index := mgo.Index{
Key: []string{REFRESHTOKEN},
Unique: false, // refreshtoken is sometimes empty
DropDups: false,
Background: true,
Sparse: true,
}
accesses := storage.Session.DB(dbName).C(ACCESS_COL)
err := accesses.EnsureIndex(index)
if err != nil {
panic(err)
}
return storage
}
//renew new storage with cloned session
func (s *OAuthStorage) Clone() osin.Storage {
//clone mgo session and return the storage
clonedSession := s.Session.Clone()
newStorage := &OAuthStorage{s.dbName, clonedSession}
return newStorage
}
//close the session
func (s *OAuthStorage) Close() {
if s.Session != nil {
s.Session.Close()
}
}
func (s *OAuthStorage) GetClient(id string) (osin.Client, error) {
clients := s.Session.DB(s.dbName).C(CLIENT_COL)
client := &osin.DefaultClient{}
err := clients.Find(bson.M{"_id": id}).One(client)
return client, err
}
func (s *OAuthStorage) GetClients(query bson.M, pageSize, pageNum int) ([]osin.DefaultClient, error) {
// err = sess.DB("test").C("foo").Find(bson.M{}).Skip(pagesize * (n - 1)).Limit(10)
clients := s.Session.DB(s.dbName).C(CLIENT_COL)
clientArr := []osin.DefaultClient{}
err := clients.Find(query).Skip(pageSize * (pageNum - 1)).Limit(pageSize).All(&clientArr)
return clientArr, err
}
func (s *OAuthStorage) SetClient(id string, client osin.Client) error {
clients := s.Session.DB(s.dbName).C(CLIENT_COL)
_, err := clients.UpsertId(id, client)
return err
}
// RemoveClient todo
func (s *OAuthStorage) RemoveClient(id string) error {
clients := s.Session.DB(s.dbName).C(CLIENT_COL)
err := clients.RemoveId(id)
return err
}
func (s *OAuthStorage) SaveAuthorize(data *osin.AuthorizeData) error {
authorizations := s.Session.DB(s.dbName).C(AUTHORIZE_COL)
_, err := authorizations.UpsertId(data.Code, data)
return err
}
func (s *OAuthStorage) LoadAuthorize(code string) (*osin.AuthorizeData, error) {
authorizations := s.Session.DB(s.dbName).C(AUTHORIZE_COL)
authData := osin.AuthorizeData{Client: &osin.DefaultClient{}}
genericAuthorizeData := make(map[string]interface{})
if err := authorizations.FindId(code).One(&genericAuthorizeData); err != nil {
return &authData, err
}
jsonData, err := json.Marshal(&genericAuthorizeData)
if err != nil {
return &authData, err
}
//then unmarshal again
if err := json.Unmarshal(jsonData, &authData); err != nil {
return &authData, err
}
//if everything is fine; then redirect directly
return &authData, nil
}
func (s *OAuthStorage) RemoveAuthorize(code string) error {
authorizations := s.Session.DB(s.dbName).C(AUTHORIZE_COL)
return authorizations.RemoveId(code)
}
func (s *OAuthStorage) SaveAccess(data *osin.AccessData) error {
accesses := s.Session.DB(s.dbName).C(ACCESS_COL)
//to avoid multiple nested previous record
if data.AccessData != nil {
data.AccessData.AccessData = nil
}
_, err := accesses.UpsertId(data.AccessToken, data)
return err
}
func (s *OAuthStorage) LoadAccess(token string) (*osin.AccessData, error) {
accesses := s.Session.DB(s.dbName).C(ACCESS_COL)
newClient := osin.DefaultClient{}
authorizeData := osin.AuthorizeData{
Client: &newClient,
}
prevNewClient := osin.DefaultClient{}
//TODO: check overhere to avoid infitite recursive -- because client is interface
accData := osin.AccessData{
Client: &newClient,
AuthorizeData: &authorizeData,
AccessData: &osin.AccessData{
Client: &prevNewClient,
AuthorizeData: &osin.AuthorizeData{
Client: &prevNewClient,
},
},
}
genericAccessData := make(map[string]interface{})
if err := accesses.FindId(token).One(&genericAccessData); err != nil {
return &accData, err
}
jsonData, err := json.Marshal(&genericAccessData)
if err != nil {
return &accData, err
}
//then unmarshal again
if err := json.Unmarshal(jsonData, &accData); err != nil {
return &accData, err
}
//if everything is fine; then redirect directly
return &accData, err
}
func (s *OAuthStorage) RemoveAccess(token string) error {
accesses := s.Session.DB(s.dbName).C(ACCESS_COL)
return accesses.RemoveId(token)
}
//loading access data based on refresh token instead
func (s *OAuthStorage) LoadRefresh(token string) (*osin.AccessData, error) {
accesses := s.Session.DB(s.dbName).C(ACCESS_COL)
newClient := osin.DefaultClient{}
authorizeData := osin.AuthorizeData{
Client: &newClient,
}
prevNewClient := osin.DefaultClient{}
//TODO: check overhere to avoid infitite recursive -- because client is interface
accData := osin.AccessData{
Client: &newClient,
AuthorizeData: &authorizeData,
AccessData: &osin.AccessData{
Client: &prevNewClient,
AuthorizeData: &osin.AuthorizeData{
Client: &prevNewClient,
},
},
}
genericAccessData := make(map[string]interface{})
if err := accesses.Find(bson.M{REFRESHTOKEN: token}).One(&genericAccessData); err != nil {
return &accData, err
}
jsonData, err := json.Marshal(&genericAccessData)
if err != nil {
return &accData, err
}
//then unmarshal again
if err := json.Unmarshal(jsonData, &accData); err != nil {
return &accData, err
}
//if everything is fine; then redirect directly
return &accData, err
}
func (s *OAuthStorage) RemoveRefresh(token string) error {
accesses := s.Session.DB(s.dbName).C(ACCESS_COL)
return accesses.Update(bson.M{REFRESHTOKEN: token}, bson.M{
"$unset": bson.M{
REFRESHTOKEN: 1,
}})
}
|
/*
Copyright 2023 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package web
import (
"net/http"
"github.com/gravitational/trace"
"github.com/julienschmidt/httprouter"
"github.com/gravitational/teleport/lib/reversetunnel"
)
func (h *Handler) createAssistantConversation(_ http.ResponseWriter, r *http.Request,
_ httprouter.Params, sctx *SessionContext,
) (any, error) {
return nil, trace.NotImplemented("not implemented")
}
func (h *Handler) getAssistantConversationByID(_ http.ResponseWriter, r *http.Request,
p httprouter.Params, sctx *SessionContext,
) (any, error) {
return nil, trace.NotImplemented("not implemented")
}
func (h *Handler) getAssistantConversations(_ http.ResponseWriter, r *http.Request,
_ httprouter.Params, sctx *SessionContext,
) (any, error) {
return nil, trace.NotImplemented("not implemented")
}
func (h *Handler) setAssistantTitle(_ http.ResponseWriter, r *http.Request,
p httprouter.Params, sctx *SessionContext,
) (any, error) {
return nil, trace.NotImplemented("not implemented")
}
func (h *Handler) generateAssistantTitle(_ http.ResponseWriter, r *http.Request,
p httprouter.Params, sctx *SessionContext,
) (any, error) {
return nil, trace.NotImplemented("not implemented")
}
func (h *Handler) assistant(w http.ResponseWriter, r *http.Request, _ httprouter.Params,
sctx *SessionContext, site reversetunnel.RemoteSite,
) (any, error) {
return nil, trace.NotImplemented("not implemented")
}
|
package tx
import (
"encoding/json"
"github.com/transmutate-io/cryptocore/types"
)
var (
_ Tx = (*TxBCH)(nil)
_ TxUTXO = (*TxBCH)(nil)
)
type TxBCH struct{ txBTC }
func (tx *TxBCH) ID() types.Bytes { return tx.txBTC.ID }
func (tx *TxBCH) Hash() types.Bytes { return tx.txBTC.Hash }
func (tx *TxBCH) BlockHash() types.Bytes { return tx.txBTC.BlockHash }
func (tx *TxBCH) Confirmations() int { return tx.txBTC.Confirmations }
func (tx *TxBCH) BlockTime() types.UnixTime { return tx.txBTC.BlockTime }
func (tx *TxBCH) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &tx.txBTC) }
func (tx *TxBCH) UTXO() (TxUTXO, bool) { return tx, true }
func (tx *TxBCH) LockTime() types.UnixTime { return tx.txBTC.LockTime }
func (tx *TxBCH) Inputs() []Input {
r := make([]Input, 0, len(tx.txBTC.Inputs))
for _, i := range tx.txBTC.Inputs {
r = append(r, &wrapInput{i})
}
return r
}
func (tx *TxBCH) Outputs() []Output {
r := make([]Output, 0, len(tx.txBTC.Outputs))
for _, i := range tx.txBTC.Outputs {
r = append(r, &wrapOutput{i})
}
return r
}
|
package gg
import (
"fmt"
"testing"
)
func _(t *testing.T) {
fmt.Println("x_x")
}
func TestBasic(t *testing.T) {
a := Quadtree{B, nil, nil, nil, nil}
b := Quadtree{W, nil, nil, nil, nil}
if a.Color != B || b.Color != W {
t.Fail()
}
c := NewQuadtreeLeaf(B, B, W, W)
if c.NW.Color != B || c.NE.Color != B || c.SE.Color != W || c.SW.Color != W {
t.Fail()
}
d := NewQuadtreeLeaf(W, W, B, W)
if d.NW.Color != W || d.NE.Color != W || d.SE.Color != B || d.SW.Color != W {
t.Fail()
}
e := NewQuadtree(
NewQuadtreeLeaf(B, B, B, W),
NewQuadtreeLeaf(W, W, B, W),
NewQuadtreeWhite(),
NewQuadtreeBlack(),
)
e_nw := e.NW
e_ne := e.NE
if e.Color != G || e.NW.Color != G || e.NE.Color != G || e.SE.Color != W || e.SW.Color != B {
t.Fail()
}
if e_nw.NW.Color != B || e_nw.NE.Color != B || e_nw.SE.Color != B || e_nw.SW.Color != W {
t.Fail()
}
if e_ne.NW.Color != W || e_ne.NE.Color != W || e_ne.SE.Color != B || e_ne.SW.Color != W {
t.Fail()
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
)
func main() {
if len(os.Args) != 2 {
os.Exit(1)
}
file, err := os.Open(os.Args[1])
check(err)
var wrap int
var ribbon int
scanner := bufio.NewScanner(file)
for scanner.Scan() {
var l, w, h int
n, err := fmt.Sscanf(scanner.Text(), "%dx%dx%d", &l, &w, &h)
if n < 3 {
panic(err)
}
wrap += SurfaceArea(l, w, h) + SmallestSideArea(l, w, h)
ribbon += Volume(l, w, h) + SmallestPerimeter(l, w, h)
}
fmt.Printf("Total square feet: %d\n", wrap)
fmt.Printf("Total ribbon length: %d\n", ribbon)
}
func check(e error) {
if e != nil {
panic(e)
}
}
func SurfaceArea(l int, w int, h int) int {
return 2*l*w + 2*w*h + 2*h*l
}
func SmallestSideArea(l int, w int, h int) int {
sorted := []int{l, w, h}
sort.Ints(sorted)
return sorted[0] * sorted[1]
}
func Volume(l int, w int, h int) int {
return l * w * h
}
func SmallestPerimeter(l int, w int, h int) int {
sorted := []int{l, w, h}
sort.Ints(sorted)
return 2*sorted[0] + 2*sorted[1]
}
|
package main
import (
"encoding/xml"
"mime/multipart"
"reflect"
"strings"
"golang.org/x/net/context"
"github.com/PuerkitoBio/goquery"
"github.com/golang/glog"
)
type EldoradoFeedParser struct {
FeedReader
}
func (e EldoradoFeedParser) ParseFeed(ctx context.Context, feedFile multipart.File) {
e.waitGroup.Add(1)
e.parserState.SetStat("reading-uri", 1)
go func() {
defer func() {
glog.Infoln("Uri reader finished")
e.waitGroup.Done()
e.parserState.SetStat("reading-uri", -1)
feedFile.Close()
}()
glog.Infoln("Uri reader started")
XMLParse(ctx, feedFile, e.productExtractorChan, e.startTokensChan,
YML_NAMES_MAP, reflect.TypeOf(EldoradoOffer{}))
}()
}
type EldoradoOffer struct {
XMLName xml.Name `xml:"offer"`
Id string `xml:"id,attr"`
Available string `xml:"available,attr"`
Type string `xml:"type,attr"`
Uri string `xml:"url"`
Price string `xml:"price"`
CurrencyId string `xml:"currencyId"`
CategoryId string `xml:"categoryId"`
Picture string `xml:"picture"`
Vendor string `xml:"vendor"`
Model string `xml:"model"`
Description string `xml:"description"`
Cpa string `xml:"cpa"`
Name string `xml:"name"`
Attributes []Attribute
}
func (o *EldoradoOffer) GetProductInfo() (interface{}, error) {
uri := strings.Split(o.Uri, "?")[0]
body, err := GetBody(uri)
if err != nil {
return nil, err
}
bodyReader := strings.NewReader(body)
doc, err := goquery.NewDocumentFromReader(bodyReader)
if err != nil {
return nil, err
}
name := doc.Find(".pp-description .text-b-o-c span").First().Text()
description := doc.Find(".pp-description-text").First().Text()
o.Description = description
o.Name = name
o.Uri = uri
attrHandler := func(i int, s *goquery.Selection) {
name := strings.TrimSuffix(s.Find("th div div").First().Text(), ":")
if name == "" {
return
}
value := s.Find("td").First().Text()
attr := Attribute{Name: name, Value: value}
o.Attributes = append(o.Attributes, attr)
}
doc.Find(".pp-characteristics-table").Find("tr").Each(attrHandler)
return o, nil
}
|
/*
* User Services Platform - Controller API
*
* This is the REST API for the User Services Platform (USP) Controller. This is how an external entity can interact with a USP Controller to retrieve information from or configure a USP Agent.
*
* API version: 1.0.0
* Contact: info@broadband-forum.org
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package swagger
import (
"net/http"
)
func CreateAgent(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func CreateAgentServiceElement(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func DeleteAgent(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func DeleteAgentServiceElement(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func ExecuteAgentServiceElementAction(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func GetAgentDetails(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func GetAgentServiceElement(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func GetAgentServiceElementActionResults(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func GetAgentStatus(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func SearchAgents(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func UpdateAgent(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
func UpdateAgentServiceElements(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
}
|
package main
import "fmt"
import "golang-book/ques3/sleep"
func main() {
c1 := make(chan string)
c2 := make(chan string)
go func() {
for {
c1 <- "from1"
sleep.Sleep(2)
}
}()
go func() {
for {
c2 <- "from2"
sleep.Sleep(3)
}
}()
go func() {
for {
select {
case msg1 := <-c1:
fmt.Println(msg1)
case msg2 := <-c2:
fmt.Println(msg2)
}
}
}()
var input string
fmt.Scanln(&input)
}
|
package plugin
import (
"fmt"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
"github.com/golang/protobuf/protoc-gen-go/generator"
valid "github.com/lanceryou/go-validator"
"strings"
)
func init() {
generator.RegisterPlugin(new(validator))
}
type validator struct {
gen *generator.Generator
}
func (v *validator) Name() string {
return "go-validator"
}
func (v *validator) Init(g *generator.Generator) {
v.gen = g
}
func (v *validator) Generate(file *generator.FileDescriptor) {
// only generate validator code
v.gen.Reset()
validatorMessages := getValidatorMessage(file.MessageType)
for _, message := range file.MessageType {
if file.GetSyntax() == "proto3" {
v.generateProto3Validator(file, message, "", validatorMessages)
}
}
}
func (v *validator) GenerateImports(file *generator.FileDescriptor) {}
// P forwards to g.gen.P.
func (g *validator) P(args ...interface{}) { g.gen.P(args...) }
func (v *validator) generateProto3Validator(file *generator.FileDescriptor, desc *descriptor.DescriptorProto, prefix string, msgs []string) {
if !hasValidatorField(desc) {
return
}
ccTypeName := prefix + generator.CamelCase(desc.GetName())
v.P(`func (this *`, ccTypeName, `) Validate() error {`)
v.gen.In()
// support nested message
for _, field := range desc.Field {
v.generateField(field, desc, msgs)
}
v.P(`return nil`)
v.gen.Out()
v.P(`}`)
v.P()
for _, nested := range desc.NestedType {
v.generateProto3Validator(file, nested, ccTypeName+"_", msgs)
}
}
// number string 类型 按照type比较
// message 类型 生成
// reqeated 类型判断长度
func (v *validator) generateField(field *descriptor.FieldDescriptorProto, desc *descriptor.DescriptorProto, msgs []string) {
fieldValidator := getValidatorField(field)
if fieldValidator == nil {
return
}
if field.Type == nil {
return
}
variableName := "this." + generator.CamelCase(*field.Name)
if isMessage(field) && !isRepeated(field) {
// validate nil and validate message if not nil
v.generateMessageValidator(variableName, fieldValidator, contains(desc.GetName(), msgs))
} else if isRepeated(field) || isString(field) {
// validate length
v.generateArrayValidator(variableName, fieldValidator)
} else {
v.generateFieldValidator(variableName, fieldValidator)
}
}
func (v *validator) generateMessageValidator(variableName string, fv *valid.FieldValidator, isValidatorMessage bool) {
if fv.Neq == "nil" {
v.P(`if !(`, variableName, `!=`, fv.Neq, `) {`)
v.gen.In()
v.P(`return fmt.Errorf("validation error: `, variableName, ` must be not equal nil")`)
v.gen.Out()
v.P(`}`)
v.P()
}
// if err := variableName.Validate(); err != nil{
// return err
// }
if !isValidatorMessage {
return
}
v.P(`if err := `, variableName, `.Validate(); err != nil{`)
v.gen.In()
v.P(`return err`)
v.gen.Out()
v.P(`}`)
v.P()
}
func (v *validator) generateArrayValidator(variableName string, fv *valid.FieldValidator) {
type Filed struct {
Opt string
Value string
Err string
}
fields := []Filed{
{
Opt: " < ",
Value: fv.Lt,
Err: fmt.Sprintf("%s be greater than len(%s)", variableName, fv.Lt),
},
{
Opt: " > ",
Value: fv.Gt,
Err: fmt.Sprintf("%s be less than len(%s)", variableName, fv.Gt),
},
{
Opt: " == ",
Value: fv.Eq,
Err: fmt.Sprintf("%s be not equal len(%s)", variableName, fv.Eq),
},
{
Opt: " != ",
Value: fv.Neq,
Err: fmt.Sprintf("%s be equal len(%s)", variableName, fv.Neq),
},
}
for _, field := range fields {
if field.Value != "" {
v.P(`if !(len( `, variableName, `)`, field.Opt, field.Value, `) {`)
v.gen.In()
v.P(`return fmt.Errorf("validation error: `, field.Err, `")`)
v.gen.Out()
v.P(`}`)
v.P()
}
}
}
func (v *validator) generateFieldValidator(variableName string, fv *valid.FieldValidator) {
type Filed struct {
Opt string
Value string
Err string
}
fields := []Filed{
{
Opt: variableName + " < " + fv.Lt,
Value: fv.Lt,
Err: fmt.Sprintf("%s be greater than %s", variableName, fv.Lt),
},
{
Opt: variableName + " > " + fv.Gt,
Value: fv.Gt,
Err: fmt.Sprintf("%s be less than %s", variableName, fv.Gt),
},
{
Opt: or(variableName, " == ", fv.Eq),
Value: fv.Eq,
Err: fmt.Sprintf("%s be not equal %s", variableName, fv.Eq),
},
{
Opt: variableName + " != " + fv.Neq,
Value: fv.Neq,
Err: fmt.Sprintf("%s be equal %s", variableName, fv.Gt),
},
}
for _, field := range fields {
if field.Value != "" {
v.P(`if !(`, field.Opt, `) {`)
v.gen.In()
v.P(`return fmt.Errorf("validation error: `, field.Err, `")`)
v.gen.Out()
v.P(`}`)
v.P()
}
}
}
func getValidatorField(field *descriptor.FieldDescriptorProto) *valid.FieldValidator {
if field.Options == nil {
return nil
}
v, err := proto.GetExtension(field.Options, valid.E_Field)
if err == nil && v.(*valid.FieldValidator) != nil {
return v.(*valid.FieldValidator)
}
return nil
}
func hasValidatorField(desc *descriptor.DescriptorProto) (has bool) {
for _, field := range desc.Field {
if field.Options != nil {
return true
}
}
return false
}
func getValidatorMessage(descs []*descriptor.DescriptorProto) (msgs []string) {
for _, msg := range descs {
if hasValidatorField(msg) {
msgs = append(msgs, *msg.Name)
}
msgs = append(msgs, getValidatorMessage(msg.NestedType)...)
}
return
}
func or(variableName string, opt string, str string) (value string) {
all := strings.Split(strings.Replace(str, " ", "", -1), ",")
suffix := "||\n"
for i, s := range all {
if i == len(all)-1 {
suffix = ""
}
value += variableName + opt + s + suffix
}
return value
}
func contains(dst string, src []string) bool {
for _, str := range src {
if str == dst {
return true
}
}
return false
}
func isRepeated(field *descriptor.FieldDescriptorProto) bool {
return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
}
func isMessage(field *descriptor.FieldDescriptorProto) bool {
return *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE
}
func isString(field *descriptor.FieldDescriptorProto) bool {
return *field.Type == descriptor.FieldDescriptorProto_TYPE_STRING
}
|
package main
import (
"runtime"
)
func main() {
a1 := []int{4,3,4}
b1 := len(a1)
print(runtime.GOOS, b1)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package filemanager
import (
"context"
"strings"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/drivefs"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DrivefsBlobDownload,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verify that a file created in Drive Web can be downloaded",
Contacts: []string{
"travislane@google.com",
"chromeos-files-syd@google.com",
},
SoftwareDeps: []string{
"chrome",
"chrome_internal",
"drivefs",
},
Attr: []string{
"group:mainline",
"group:drivefs-cq",
"informational",
},
Data: []string{
"test_1KB.txt",
},
Timeout: 5 * time.Minute,
Params: []testing.Param{{
Fixture: "driveFsStarted",
}, {
Name: "chrome_networking",
Fixture: "driveFsStartedWithChromeNetworking",
}},
})
}
func DrivefsBlobDownload(ctx context.Context, s *testing.State) {
const (
retryAttempts = 20
retryInterval = 5 * time.Second
)
fixt := s.FixtValue().(*drivefs.FixtureData)
apiClient := fixt.APIClient
driveFsClient := fixt.DriveFs
// Give the Drive API enough time to remove the file.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
defer driveFsClient.SaveLogsOnError(cleanupCtx, s.HasError)
// Create the test file with the Drive API
testFileName := drivefs.GenerateTestFileName(s.TestName()) + ".txt"
driveFile, err := apiClient.CreateFileFromLocalFile(ctx,
testFileName, "root", s.DataPath("test_1KB.txt"))
if err != nil {
s.Fatal("Could not create test file: ", err)
}
s.Logf("Created %s with ID: %s", testFileName, driveFile.Id)
// Cleanup: Remove the file on the cloud
defer apiClient.RemoveFileByID(cleanupCtx, driveFile.Id)
// Wait for file to be available locally
testFilePath := driveFsClient.MyDrivePath(testFileName)
testFile, err := driveFsClient.NewFile(testFilePath)
if err != nil {
s.Fatal("Could not build DriveFS file: ", err)
}
err = action.RetrySilently(retryAttempts, testFile.ExistsAction(), retryInterval)(ctx)
if err != nil {
s.Fatal("File not available locally: ", err)
}
// Now compare the uploaded data with what we have locally
md5Sum, err := drivefs.MD5SumFile(testFilePath)
if err != nil {
s.Fatal("Failed to checksum file: ", err)
}
if !strings.EqualFold(md5Sum, driveFile.Md5Checksum) {
s.Errorf("Checksum mismatch! Got: %v Expected: %v", md5Sum, driveFile.Md5Checksum)
}
}
|
package go_implement_c
import "fmt"
//void SayHello(char *name);
import "C"
//export SayHello
func SayHello(s *C.char){
fmt.Println(C.GoString(s))
}
func BasicApp() {
SayHello(C.CString("Implement C"))
}
|
package logx_test
import (
"bytes"
"fmt"
"io"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/socialpoint-labs/bsk/logx"
)
func TestDefaultAndLogstashLogging(t *testing.T) {
t.Parallel()
a := assert.New(t)
rec := make(recorder, 1)
defaultLogger := logx.New(logx.WriterOpt(rec), logx.WithoutTimeOpt())
defaultLoggerWithoutFileInfo := logx.New(logx.WriterOpt(rec), logx.WithoutTimeOpt(), logx.WithoutFileInfo())
logstashLogger := logx.NewLogstash("mychan", "myprod", "myapp", logx.WriterOpt(rec), logx.WithoutTimeOpt())
logstashLoggerWithOriginalValues := logx.New(logx.MarshalerOpt(logx.NewLogstashMarshaler("mychan", "myprod", "myapp", logx.WithOriginalValueTypes())), logx.WriterOpt(rec), logx.WithoutTimeOpt())
logstashLoggerWithoutFileInfo := logx.NewLogstash("mychan", "myprod", "myapp", logx.WriterOpt(rec), logx.WithoutTimeOpt(), logx.WithoutFileInfo())
logstashLoggerWithEnvironment := logx.New(logx.MarshalerOpt(logx.NewLogstashMarshaler("mychan", "myprod", "myapp", logx.WithEnvironment("prod"))), logx.WriterOpt(rec), logx.WithoutTimeOpt())
hostname, _ := os.Hostname()
for _, tc := range []struct {
logger logx.Logger
message string
fields []logx.Field
output string
}{
{logger: defaultLogger, output: "INFO File: logx_test.go:58\n"},
{logger: defaultLogger, message: "Test", output: "INFO Test File: logx_test.go:58\n"},
{logger: defaultLoggerWithoutFileInfo, message: "Test 2", fields: []logx.Field{logx.F("foo", "some stuff")}, output: "INFO Test 2 FIELDS foo=some stuff\n"},
// "type" is a logstash reserved keyword but just changes in logstash log
{logger: defaultLogger, message: "Test 3", fields: []logx.Field{logx.F("type", "val")}, output: "INFO Test 3 FIELDS type=val File: logx_test.go:56\n"},
{logger: defaultLogger, message: "Test 4", fields: []logx.Field{logx.F("number", 111)}, output: "INFO Test 4 FIELDS number=111 File: logx_test.go:56\n"},
{logger: defaultLoggerWithoutFileInfo, message: "Test 5", fields: []logx.Field{logx.F("type", "val"), logx.F("myint", 111), logx.F("myfloat", 3.1416)}, output: "INFO Test 5 FIELDS type=val myint=111 myfloat=3.1416\n"},
{logger: logstashLogger, output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"file\":\"logx_test.go:58\",\"message\":\"\",\"product\":\"myprod\",\"severity\":\"INFO\"}\n", hostname)},
{logger: logstashLogger, message: "Test", output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"file\":\"logx_test.go:58\",\"message\":\"Test\",\"product\":\"myprod\",\"severity\":\"INFO\"}\n", hostname)},
{logger: logstashLoggerWithoutFileInfo, message: "Test 2", fields: []logx.Field{logx.F("foo", "some stuff")}, output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"foo\":\"some stuff\",\"message\":\"Test 2\",\"product\":\"myprod\",\"severity\":\"INFO\"}\n", hostname)},
// "type" is a logstash reserved keyword but just changes in logstash log
{logger: logstashLogger, message: "Test 3", fields: []logx.Field{logx.F("type", "val")}, output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"file\":\"logx_test.go:56\",\"message\":\"Test 3\",\"product\":\"myprod\",\"severity\":\"INFO\",\"typex\":\"val\"}\n", hostname)},
{logger: logstashLogger, message: "Test 4", fields: []logx.Field{logx.F("number", 111)}, output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"file\":\"logx_test.go:56\",\"message\":\"Test 4\",\"number\":\"111\",\"product\":\"myprod\",\"severity\":\"INFO\"}\n", hostname)},
{logger: logstashLoggerWithoutFileInfo, message: "Test 5", fields: []logx.Field{logx.F("type", "val"), logx.F("number", 111)}, output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"message\":\"Test 5\",\"number\":\"111\",\"product\":\"myprod\",\"severity\":\"INFO\",\"typex\":\"val\"}\n", hostname)},
{logger: logstashLoggerWithOriginalValues, message: "Test With Original Values But No Fields", output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"file\":\"logx_test.go:58\",\"message\":\"Test With Original Values But No Fields\",\"product\":\"myprod\",\"severity\":\"INFO\"}\n", hostname)},
{logger: logstashLoggerWithOriginalValues, message: "Test With Original Values", fields: []logx.Field{logx.F("string", "hi there"), logx.F("number", 123), logx.F("array", []int{1, 2, 3}), logx.F("map", map[string]int{"foo": 123, "bar": 456})}, output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"array\":[1,2,3],\"channel\":\"mychan\",\"file\":\"logx_test.go:56\",\"map\":{\"bar\":456,\"foo\":123},\"message\":\"Test With Original Values\",\"number\":123,\"product\":\"myprod\",\"severity\":\"INFO\",\"string\":\"hi there\"}\n", hostname)},
{logger: logstashLoggerWithEnvironment, message: "Test", output: fmt.Sprintf("{\"@version\":1,\"app_server_name\":\"%s\",\"application\":\"myapp\",\"channel\":\"mychan\",\"environment\":\"prod\",\"file\":\"logx_test.go:58\",\"message\":\"Test\",\"product\":\"myprod\",\"severity\":\"INFO\"}\n", hostname)},
} {
if tc.fields != nil {
tc.logger.Info(tc.message, tc.fields...)
} else {
tc.logger.Info(tc.message)
}
a.Equal(tc.output, <-rec)
}
}
func TestLoggingWithCustomSkipLevel(t *testing.T) {
t.Parallel()
a := assert.New(t)
rec := make(recorder, 1)
defaultLogger := logx.New(logx.WriterOpt(rec), logx.WithoutTimeOpt(), logx.AdditionalFileSkipLevel(1))
log(defaultLogger, "Test")
a.Equal("INFO Test File: logx_test.go:70\n", <-rec)
}
func log(logger logx.Logger, message string) {
logger.Info(message)
}
func TestLogLevel(t *testing.T) {
t.Parallel()
a := assert.New(t)
var buf bytes.Buffer
logger := logx.New(logx.WriterOpt(&buf))
logger.Info("test")
content, err := io.ReadAll(&buf)
a.NoError(err)
a.True(len(content) > 0)
logger.Error("test2")
content, err = io.ReadAll(&buf)
a.NoError(err)
a.True(len(content) > 0)
logger = logx.New(logx.WriterOpt(&buf), logx.LevelOpt(logx.ErrorLevel))
// since now the min level is error then a debug message won't be logged
logger.Info("test")
content, err = io.ReadAll(&buf)
a.NoError(err)
a.Len(content, 0)
logger.Error("test2")
content, err = io.ReadAll(&buf)
a.NoError(err)
a.True(len(content) > 0)
}
func TestDummy(t *testing.T) {
t.Parallel()
a := assert.New(t)
var buf bytes.Buffer
logger := logx.NewDummy(logx.WriterOpt(&buf))
logger.Info("test")
content, err := io.ReadAll(&buf)
a.NoError(err)
a.Len(content, 0)
logger.Error("test2")
content, err = io.ReadAll(&buf)
a.NoError(err)
a.Len(content, 0)
}
type recorder chan string
func (r recorder) Write(b []byte) (n int, err error) {
r <- string(b)
return len(b), nil
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"math"
"os"
"strconv"
)
/**
* Wishing you Godspeed
* https://www.hackerearth.com/practice/data-structures/arrays/1-d/practice-problems/algorithm/speed-7/
*/
func main12() {
fmt.Print(SolveSpeed(bufio.NewScanner(os.Stdin)))
}
func SolveSpeed(scanner *bufio.Scanner) string {
scanner.Split(bufio.ScanWords)
var buffer bytes.Buffer
scanner.Scan()
tests, _ := strconv.Atoi(scanner.Text())
for i := 0; i < tests; i++ {
scanner.Scan()
cars, _ := strconv.Atoi(scanner.Text())
max := math.MaxInt32
count := 0
for i := 0; i < cars; i++ {
scanner.Scan()
cur, _ := strconv.Atoi(scanner.Text())
if cur < max {
max = cur
count++
}
}
buffer.WriteString(fmt.Sprintf("%d\n", count))
}
return buffer.String()
}
|
package configcommands
import (
"errors"
"fmt"
"github.com/vmwarepivotallabs/cf-mgmt/config"
)
type GlobalConfigurationCommand struct {
ConfigManager config.Manager
BaseConfigCommand
EnableDeleteIsolationSegments string `long:"enable-delete-isolation-segments" description:"Enable removing isolation segments" choice:"true" choice:"false"`
EnableDeleteSharedDomains string `long:"enable-delete-shared-domains" description:"Enable removing shared domains" choice:"true" choice:"false"`
EnableServiceAccess string `long:"enable-service-access" description:"Enable managing service access" choice:"true" choice:"false"`
EnableUnassignSecurityGroups string `long:"enable-unassign-security-groups" description:"Enable unassigning security groups" choice:"true" choice:"false"`
MetadataPrefix string `long:"metadata-prefix" description:"Prefix for org/space metadata"`
StagingSecurityGroups []string `long:"staging-security-group" description:"Staging Security Group to add"`
RemoveStagingSecurityGroups []string `long:"remove-staging-security-group" description:"Staging Security Group to remove"`
RunningSecurityGroups []string `long:"running-security-group" description:"Running Security Group to add"`
RemoveRunningSecurityGroups []string `long:"remove-running-security-group" description:"Running Security Group to remove"`
SharedDomains []string `long:"shared-domain" description:"Shared Domain to add"`
RouterGroupSharedDomains []string `long:"router-group-shared-domain" description:"Router Group Shared Domain to add"`
RouterGroupSharedDomainsGroups []string `long:"router-group-shared-domain-group" description:"Router Group Shared Domain group"`
InternalSharedDomains []string `long:"internal-shared-domain" description:"Internal Shared Domain to add"`
RemoveSharedDomains []string `long:"remove-shared-domain" description:"Shared Domain to remove"`
ServiceAccess GlobalServiceAccess `group:"service-access"`
}
//Execute - adds/updates a named asg to the configuration
func (c *GlobalConfigurationCommand) Execute([]string) error {
c.initConfig()
globalConfig, err := c.ConfigManager.GetGlobalConfig()
if err != nil {
return err
}
errorString := ""
if globalConfig.SharedDomains == nil {
globalConfig.SharedDomains = map[string]config.SharedDomain{}
}
convertToBool("enable-delete-isolation-segments", &globalConfig.EnableDeleteIsolationSegments, c.EnableDeleteIsolationSegments, &errorString)
convertToBool("enable-delete-shared-domains", &globalConfig.EnableDeleteSharedDomains, c.EnableDeleteSharedDomains, &errorString)
convertToBool("enable-service-access", &globalConfig.EnableServiceAccess, c.EnableServiceAccess, &errorString)
convertToBool("enable-unassign-security-groups", &globalConfig.EnableUnassignSecurityGroups, c.EnableUnassignSecurityGroups, &errorString)
if c.MetadataPrefix != "" {
globalConfig.MetadataPrefix = c.MetadataPrefix
}
globalConfig.StagingSecurityGroups = c.updateSecGroups(globalConfig.StagingSecurityGroups, c.StagingSecurityGroups, c.RemoveStagingSecurityGroups)
globalConfig.RunningSecurityGroups = c.updateSecGroups(globalConfig.RunningSecurityGroups, c.RunningSecurityGroups, c.RemoveRunningSecurityGroups)
for _, domain := range c.SharedDomains {
globalConfig.SharedDomains[domain] = config.SharedDomain{Internal: false}
}
for _, domain := range c.InternalSharedDomains {
globalConfig.SharedDomains[domain] = config.SharedDomain{Internal: true}
}
if len(c.RouterGroupSharedDomains) > 0 {
if len(c.RouterGroupSharedDomains) != len(c.RouterGroupSharedDomainsGroups) {
return fmt.Errorf("Must specify same number of router-group-shared-domain args as router-group-shared-domain-group args")
}
for index, domain := range c.RouterGroupSharedDomains {
globalConfig.SharedDomains[domain] = config.SharedDomain{
Internal: false,
RouterGroup: c.RouterGroupSharedDomainsGroups[index],
}
}
}
for _, domain := range c.RemoveSharedDomains {
delete(globalConfig.SharedDomains, domain)
}
errorList := c.UpdateServiceAccess(globalConfig)
for _, err := range errorList {
errorString += "\n--" + err.Error()
}
if errorString != "" {
return errors.New(errorString)
}
err = c.ConfigManager.SaveGlobalConfig(globalConfig)
if err != nil {
return err
}
fmt.Println(fmt.Sprintf("The cf-mgmt.yml has been updated"))
return nil
}
func (c *GlobalConfigurationCommand) updateSecGroups(current, additions, removals []string) []string {
secGroupMap := make(map[string]string)
for _, secGroup := range current {
secGroupMap[secGroup] = secGroup
}
for _, secGroup := range additions {
secGroupMap[secGroup] = secGroup
}
for _, secGroup := range removals {
delete(secGroupMap, secGroup)
}
var result []string
for _, secGroup := range secGroupMap {
result = append(result, secGroup)
}
return result
}
func (c *GlobalConfigurationCommand) initConfig() {
if c.ConfigManager == nil {
c.ConfigManager = config.NewManager(c.ConfigDirectory)
}
}
func (c *GlobalConfigurationCommand) UpdateServiceAccess(globalConfig *config.GlobalConfig) []error {
var errorList []error
if len(c.ServiceAccess.AllAccessPlan) > 0 || len(c.ServiceAccess.LimitedAccessPlan) > 0 || len(c.ServiceAccess.NoAccessPlan) > 0 {
if len(c.ServiceAccess.Broker) == 0 {
errorList = append(errorList, fmt.Errorf("must specify --broker arg"))
}
if len(c.ServiceAccess.Service) == 0 {
errorList = append(errorList, fmt.Errorf("must specify --service arg"))
}
if len(errorList) > 0 {
return errorList
}
broker := globalConfig.GetBroker(c.ServiceAccess.Broker)
service := broker.GetService(c.ServiceAccess.Service)
if len(c.ServiceAccess.AllAccessPlan) > 0 {
service.AddAllAccessPlan(c.ServiceAccess.AllAccessPlan)
}
if len(c.ServiceAccess.NoAccessPlan) > 0 {
service.AddNoAccessPlan(c.ServiceAccess.NoAccessPlan)
}
if len(c.ServiceAccess.LimitedAccessPlan) > 0 {
service.AddLimitedAccessPlan(c.ServiceAccess.LimitedAccessPlan, c.ServiceAccess.OrgsToAdd, c.ServiceAccess.OrgsToRemove)
}
}
return nil
}
type GlobalServiceAccess struct {
Broker string `long:"broker" description:"Name of Broker"`
Service string `long:"service" description:"Name of Service"`
AllAccessPlan string `long:"all-access-plan" description:"Plan to give access to all orgs"`
LimitedAccessPlan string `long:"limited-access-plan" description:"Plan to give limited access to, must also provide org list"`
OrgsToAdd []string `long:"org" description:"Orgs to add to limited plan"`
OrgsToRemove []string `long:"remove-org" description:"Orgs to remove from limited plan"`
NoAccessPlan string `long:"no-access-plan" description:"Plan to give access to all orgs"`
}
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package wire
import (
"encoding/json"
"strings"
"unicode/utf8"
)
// FIBeneficiaryFI is the financial institution beneficiary financial institution
type FIBeneficiaryFI struct {
// tag
tag string
// Financial Institution
FIToFI FIToFI `json:"fiToFI,omitempty"`
// validator is composed for data validation
validator
// converters is composed for WIRE to GoLang Converters
converters
}
// NewFIBeneficiaryFI returns a new FIBeneficiaryFI
func NewFIBeneficiaryFI() *FIBeneficiaryFI {
fibfi := &FIBeneficiaryFI{
tag: TagFIBeneficiaryFI,
}
return fibfi
}
// Parse takes the input string and parses the FIBeneficiaryFI values
//
// Parse provides no guarantee about all fields being filled in. Callers should make a Validate() call to confirm
// successful parsing and data validity.
func (fibfi *FIBeneficiaryFI) Parse(record string) error {
if utf8.RuneCountInString(record) < 6 {
return NewTagMinLengthErr(6, len(record))
}
fibfi.tag = record[:6]
length := 6
value, read, err := fibfi.parseVariableStringField(record[length:], 30)
if err != nil {
return fieldError("LineOne", err)
}
fibfi.FIToFI.LineOne = value
length += read
value, read, err = fibfi.parseVariableStringField(record[length:], 33)
if err != nil {
return fieldError("LineTwo", err)
}
fibfi.FIToFI.LineTwo = value
length += read
value, read, err = fibfi.parseVariableStringField(record[length:], 33)
if err != nil {
return fieldError("LineThree", err)
}
fibfi.FIToFI.LineThree = value
length += read
value, read, err = fibfi.parseVariableStringField(record[length:], 33)
if err != nil {
return fieldError("LineFour", err)
}
fibfi.FIToFI.LineFour = value
length += read
value, read, err = fibfi.parseVariableStringField(record[length:], 33)
if err != nil {
return fieldError("LineFive", err)
}
fibfi.FIToFI.LineFive = value
length += read
value, read, err = fibfi.parseVariableStringField(record[length:], 33)
if err != nil {
return fieldError("LineSix", err)
}
fibfi.FIToFI.LineSix = value
length += read
if err := fibfi.verifyDataWithReadLength(record, length); err != nil {
return NewTagMaxLengthErr(err)
}
return nil
}
func (fibfi *FIBeneficiaryFI) UnmarshalJSON(data []byte) error {
type Alias FIBeneficiaryFI
aux := struct {
*Alias
}{
(*Alias)(fibfi),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
fibfi.tag = TagFIBeneficiaryFI
return nil
}
// String returns a fixed-width FIBeneficiaryFI record
func (fibfi *FIBeneficiaryFI) String() string {
return fibfi.Format(FormatOptions{
VariableLengthFields: false,
})
}
// Format returns a FIBeneficiaryFI record formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) Format(options FormatOptions) string {
var buf strings.Builder
buf.Grow(201)
buf.WriteString(fibfi.tag)
buf.WriteString(fibfi.FormatLineOne(options))
buf.WriteString(fibfi.FormatLineTwo(options))
buf.WriteString(fibfi.FormatLineThree(options))
buf.WriteString(fibfi.FormatLineFour(options))
buf.WriteString(fibfi.FormatLineFive(options))
buf.WriteString(fibfi.FormatLineSix(options))
if options.VariableLengthFields {
return fibfi.stripDelimiters(buf.String())
} else {
return buf.String()
}
}
// Validate performs WIRE format rule checks on FIBeneficiaryFI and returns an error if not Validated
// The first error encountered is returned and stops that parsing.
func (fibfi *FIBeneficiaryFI) Validate() error {
if fibfi.tag != TagFIBeneficiaryFI {
return fieldError("tag", ErrValidTagForType, fibfi.tag)
}
if err := fibfi.isAlphanumeric(fibfi.FIToFI.LineOne); err != nil {
return fieldError("LineOne", err, fibfi.FIToFI.LineOne)
}
if err := fibfi.isAlphanumeric(fibfi.FIToFI.LineTwo); err != nil {
return fieldError("LineTwo", err, fibfi.FIToFI.LineTwo)
}
if err := fibfi.isAlphanumeric(fibfi.FIToFI.LineThree); err != nil {
return fieldError("LineThree", err, fibfi.FIToFI.LineThree)
}
if err := fibfi.isAlphanumeric(fibfi.FIToFI.LineFour); err != nil {
return fieldError("LineFour", err, fibfi.FIToFI.LineFour)
}
if err := fibfi.isAlphanumeric(fibfi.FIToFI.LineFive); err != nil {
return fieldError("LineFive", err, fibfi.FIToFI.LineFive)
}
if err := fibfi.isAlphanumeric(fibfi.FIToFI.LineSix); err != nil {
return fieldError("LineSix", err, fibfi.FIToFI.LineSix)
}
return nil
}
// LineOneField gets a string of the LineOne field
func (fibfi *FIBeneficiaryFI) LineOneField() string {
return fibfi.alphaField(fibfi.FIToFI.LineOne, 30)
}
// LineTwoField gets a string of the LineTwo field
func (fibfi *FIBeneficiaryFI) LineTwoField() string {
return fibfi.alphaField(fibfi.FIToFI.LineTwo, 33)
}
// LineThreeField gets a string of the LineThree field
func (fibfi *FIBeneficiaryFI) LineThreeField() string {
return fibfi.alphaField(fibfi.FIToFI.LineThree, 33)
}
// LineFourField gets a string of the LineFour field
func (fibfi *FIBeneficiaryFI) LineFourField() string {
return fibfi.alphaField(fibfi.FIToFI.LineFour, 33)
}
// LineFiveField gets a string of the LineFive field
func (fibfi *FIBeneficiaryFI) LineFiveField() string {
return fibfi.alphaField(fibfi.FIToFI.LineFive, 33)
}
// LineSixField gets a string of the LineSix field
func (fibfi *FIBeneficiaryFI) LineSixField() string {
return fibfi.alphaField(fibfi.FIToFI.LineSix, 33)
}
// FormatLineOne returns FIToFI.LineOne formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) FormatLineOne(options FormatOptions) string {
return fibfi.formatAlphaField(fibfi.FIToFI.LineOne, 30, options)
}
// FormatLineTwo returns FIToFI.LineTwo formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) FormatLineTwo(options FormatOptions) string {
return fibfi.formatAlphaField(fibfi.FIToFI.LineTwo, 33, options)
}
// FormatLineThree FIToFI.LineThree LineOne formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) FormatLineThree(options FormatOptions) string {
return fibfi.formatAlphaField(fibfi.FIToFI.LineThree, 33, options)
}
// FormatLineFour returns FIToFI.LineFour formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) FormatLineFour(options FormatOptions) string {
return fibfi.formatAlphaField(fibfi.FIToFI.LineFour, 33, options)
}
// FormatLineFive returns FIToFI.LineFive formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) FormatLineFive(options FormatOptions) string {
return fibfi.formatAlphaField(fibfi.FIToFI.LineFive, 33, options)
}
// FormatLineSix returns FIToFI.LineSix formatted according to the FormatOptions
func (fibfi *FIBeneficiaryFI) FormatLineSix(options FormatOptions) string {
return fibfi.formatAlphaField(fibfi.FIToFI.LineSix, 33, options)
}
|
package ravendb
import (
"net/http"
"reflect"
)
var (
_ IOperation = &GetCompareExchangeValueOperation{}
)
type GetCompareExchangeValueOperation struct {
Command *GetCompareExchangeValueCommand
_key string
_clazz reflect.Type
}
func NewGetCompareExchangeValueOperation(clazz reflect.Type, key string) (*GetCompareExchangeValueOperation, error) {
if stringIsEmpty(key) {
return nil, newIllegalArgumentError("The key argument must have value")
}
return &GetCompareExchangeValueOperation{
_clazz: clazz,
_key: key,
}, nil
}
func (o *GetCompareExchangeValueOperation) GetCommand(store *DocumentStore, conventions *DocumentConventions, cache *httpCache) (RavenCommand, error) {
var err error
o.Command, err = NewGetCompareExchangeValueCommand(o._clazz, o._key, conventions)
return o.Command, err
}
var _ RavenCommand = &GetCompareExchangeValueCommand{}
type GetCompareExchangeValueCommand struct {
RavenCommandBase
_key string
_clazz reflect.Type
_conventions *DocumentConventions
Result *CompareExchangeValue
}
func NewGetCompareExchangeValueCommand(clazz reflect.Type, key string, conventions *DocumentConventions) (*GetCompareExchangeValueCommand, error) {
if stringIsEmpty(key) {
return nil, newIllegalArgumentError("The key argument must have value")
}
cmd := &GetCompareExchangeValueCommand{
RavenCommandBase: NewRavenCommandBase(),
_clazz: clazz,
_key: key,
_conventions: conventions,
}
cmd.IsReadRequest = true
return cmd, nil
}
func (c *GetCompareExchangeValueCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/cmpxchg?key=" + urlEncode(c._key)
return newHttpGet(url)
}
func (c *GetCompareExchangeValueCommand) SetResponse(response []byte, fromCache bool) error {
res, err := compareExchangeValueResultParserGetValue(c._clazz, response, c._conventions)
if err != nil {
return err
}
c.Result = res
return nil
}
|
package main
import (
"./undity"
"fmt"
"os"
)
import (
"github.com/lxn/walk"
. "github.com/lxn/walk/declarative"
)
const (
AppName = "unkarApp"
Version = "1.0.0.8"
TitleBase = AppName + " " + Version
)
/**
* アプリケーションのアイコン
*/
func GetApplicationIcon() *walk.Icon {
// アイコン
//icon, iconErr := walk.Resources.Icon("unkarApp.ico")
icon, iconErr := walk.Resources.Icon("3")
if iconErr != nil {
panic(iconErr)
}
return icon
}
////////////////////////////////////////////////////////////
// MainWin
////////////////////////////////////////////////////////////
/**
* ページ
*/
type Page interface {
walk.Container
Parent() walk.Container
SetParent(parent walk.Container) error
Title() string
}
/**
* メインウィンドウ
*/
type MainWin struct {
*walk.MainWindow
navToolBar *walk.ToolBar
pageActions []*walk.Action
pageComposite *walk.Composite
action2Page map[*walk.Action]Page
currPage Page
currAction *walk.Action
topPage *TopPage
boardPage *BoardPage
threadPage *ThreadPage
}
/**
* コンストラクタ
* @param なし
* @return (1)メインウィンドウ
* (2)エラー
*/
func NewMainWin() (*MainWin, error) {
// tmpディレクトリ
tmpDir := unkarstub.GetTmpHtmlDir()
// 先ず前回起動時のディレクトリを削除
if err := os.RemoveAll(tmpDir); err != nil {
fmt.Println(err)
}
// ディレクトリ作成
if err := os.Mkdir(tmpDir, 0777); err != nil {
fmt.Println(err)
}
// Unkar初期化処理
unkarstub.InitUnkar()
// メインウィンドウ生成
mainWin := &MainWin{
action2Page: make(map[*walk.Action]Page),
}
// アイコン
icon := GetApplicationIcon()
// メインウィンドウのウィンドウ生成
err := MainWindow{
AssignTo: &mainWin.MainWindow,
Title: TitleBase,
Icon: icon,
MinSize: Size{950, 600},
Layout: HBox{MarginsZero: true, SpacingZero: true},
Font: Font{Family: "MS Shell Dlg 2", PointSize: 12},
Children: []Widget{
ScrollView{
HorizontalFixed: true,
Layout: VBox{MarginsZero: true},
Children: []Widget{
Composite{
Layout: VBox{MarginsZero: true},
Children: []Widget{
ToolBar{
AssignTo: &mainWin.navToolBar,
Orientation: Vertical,
ButtonStyle: ToolBarButtonImageAboveText,
MaxTextRows: 2,
},
},
},
},
},
Composite{
AssignTo: &mainWin.pageComposite,
Name: "pageComposite",
Layout: HBox{MarginsZero: true, SpacingZero: true},
},
},
}.Create()
mainWin.topPage, err = newTopPage(mainWin.pageComposite, mainWin)
if err != nil {
return nil, err
}
mainWin.boardPage, err = newBoardPage(mainWin.pageComposite, mainWin)
if err != nil {
return nil, err
}
mainWin.threadPage, err = newThreadPage(mainWin.pageComposite, mainWin)
if err != nil {
return nil, err
}
action, err := mainWin.newPageAction("板一覧", "./undity/public_html/img/whiteboard.png")
if err != nil {
return nil, err
}
mainWin.action2Page[action] = mainWin.topPage
mainWin.pageActions = append(mainWin.pageActions, action)
action, err = mainWin.newPageAction("板", "./undity/public_html/img/folder.png")
if err != nil {
return nil, err
}
mainWin.action2Page[action] = mainWin.boardPage
mainWin.pageActions = append(mainWin.pageActions, action)
action, err = mainWin.newPageAction("スレッド", "./undity/public_html/img/memo.png")
if err != nil {
return nil, err
}
mainWin.action2Page[action] = mainWin.threadPage
mainWin.pageActions = append(mainWin.pageActions, action)
mainWin.updateNavigationToolBar()
if len(mainWin.pageActions) > 0 {
if err := mainWin.setCurrentAction(mainWin.pageActions[0]); err != nil {
return nil, err
}
}
return mainWin, err
}
func (mainWin *MainWin) UpdateTitle(page Page) {
s := page.Title()
if s != "" {
s += " - "
}
s += TitleBase
mainWin.MainWindow.SetTitle(s)
}
func (mainWin *MainWin) newPageAction(title, image string) (*walk.Action, error) {
img, err := walk.Resources.Bitmap(image)
if err != nil {
return nil, err
}
action := walk.NewAction()
action.SetCheckable(true)
action.SetExclusive(true)
action.SetImage(img)
action.SetText(title)
action.Triggered().Attach(func() {
mainWin.setCurrentAction(action)
})
return action, nil
}
func (mainWin *MainWin) setCurrentAction(action *walk.Action) error {
defer func() {
if !mainWin.pageComposite.IsDisposed() {
mainWin.pageComposite.RestoreState()
mainWin.pageComposite.Layout().Update(false)
}
}()
mainWin.SetFocus()
if prevPage := mainWin.currPage; prevPage != nil {
mainWin.pageComposite.SaveState()
prevPage.SetVisible(false)
prevPage.SetParent(nil)
}
page := mainWin.action2Page[action]
page.SetParent(mainWin.pageComposite)
page.SetVisible(true)
action.SetChecked(true)
mainWin.currPage = page
mainWin.currAction = action
mainWin.UpdateTitle(page)
return nil
}
func (mainWin *MainWin) updateNavigationToolBar() error {
mainWin.navToolBar.SetSuspended(true)
defer mainWin.navToolBar.SetSuspended(false)
actions := mainWin.navToolBar.Actions()
if err := actions.Clear(); err != nil {
return err
}
for _, action := range mainWin.pageActions {
if err := actions.Add(action); err != nil {
return err
}
}
if mainWin.currAction != nil {
if !actions.Contains(mainWin.currAction) {
for _, action := range mainWin.pageActions {
if action != mainWin.currAction {
if err := mainWin.setCurrentAction(action); err != nil {
return err
}
break
}
}
}
}
return nil
}
func (mainWin *MainWin) topPageAction() *walk.Action {
var tgtAction *walk.Action = nil
tgtPage := mainWin.topPage
for workAction, workPage := range mainWin.action2Page {
if workPage == tgtPage {
tgtAction = workAction
break
}
}
return tgtAction
}
func (mainWin *MainWin) boardPageAction() *walk.Action {
var tgtAction *walk.Action = nil
tgtPage := mainWin.boardPage
for workAction, workPage := range mainWin.action2Page {
if workPage == tgtPage {
tgtAction = workAction
break
}
}
return tgtAction
}
func (mainWin *MainWin) threadPageAction() *walk.Action {
var tgtAction *walk.Action = nil
tgtPage := mainWin.threadPage
for workAction, workPage := range mainWin.action2Page {
if workPage == tgtPage {
tgtAction = workAction
break
}
}
return tgtAction
}
func (mainWin *MainWin) NavigateToTopPage() {
action := mainWin.topPageAction()
mainWin.topPage.UpdateContents()
mainWin.changePage(action)
}
func (mainWin *MainWin) NavigateToBoardPage(boardName string, boardKey string, sortAttrStr string) {
action := mainWin.boardPageAction()
mainWin.boardPage.UpdateContents(boardName, boardKey, sortAttrStr)
mainWin.changePage(action)
}
func (mainWin *MainWin) NavigateToThreadPage(boardName string, boardKey string, threadNo int64) {
action := mainWin.threadPageAction()
mainWin.threadPage.UpdateContents(boardName, boardKey, threadNo)
mainWin.changePage(action)
}
func (mainWin *MainWin) changePage(action *walk.Action) {
mainWin.clearToolBarChecked()
mainWin.setCurrentAction(action)
}
func (mainWin *MainWin) clearToolBarChecked() {
for _, workAction := range mainWin.pageActions {
workAction.SetChecked(false)
}
mainWin.currAction = nil
}
|
package main
import "fmt"
func update(p *int) {
b := 2
*p = b
}
func main() {
var (
a = 1
p = &a
)
fmt.Println(*p)
update(p)
fmt.Println(*p)
}
|
package conf
import (
"fmt"
"io/ioutil"
"strings"
"github.com/teamhephy/builder/pkg/sys"
)
const (
storageCredLocation = "/var/run/secrets/deis/objectstore/creds/"
minioHostEnvVar = "DEIS_MINIO_SERVICE_HOST"
minioPortEnvVar = "DEIS_MINIO_SERVICE_PORT"
gcsKey = "key.json"
)
// BuilderKeyLocation holds the path of the builder key secret.
var BuilderKeyLocation = "/var/run/secrets/api/auth/builder-key"
// Parameters is map which contains storage params
type Parameters map[string]interface{}
// GetBuilderKey returns the key to be used as token to interact with hephy-controller
func GetBuilderKey() (string, error) {
builderKeyBytes, err := ioutil.ReadFile(BuilderKeyLocation)
if err != nil {
return "", fmt.Errorf("couldn't get builder key from %s (%s)", BuilderKeyLocation, err)
}
builderKey := strings.Trim(string(builderKeyBytes), "\n")
return builderKey, nil
}
// GetStorageParams returns the credentials required for connecting to object storage
func GetStorageParams(env sys.Env) (Parameters, error) {
params := make(map[string]interface{})
files, err := ioutil.ReadDir(storageCredLocation)
if err != nil {
return nil, err
}
for _, file := range files {
if file.IsDir() || file.Name() == "..data" {
continue
}
data, err := ioutil.ReadFile(storageCredLocation + file.Name())
if err != nil {
return nil, err
}
//GCS expect the to have the location of the service account credential json file
if file.Name() == gcsKey {
params["keyfile"] = storageCredLocation + file.Name()
} else {
params[file.Name()] = string(data)
}
}
params["bucket"] = params["builder-bucket"]
params["container"] = params["builder-container"]
if env.Get("BUILDER_STORAGE") == "minio" {
mHost := env.Get(minioHostEnvVar)
mPort := env.Get(minioPortEnvVar)
params["regionendpoint"] = fmt.Sprintf("http://%s:%s", mHost, mPort)
params["secure"] = false
params["region"] = "us-east-1"
params["bucket"] = "git"
}
return params, nil
}
|
package bitwire
import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/stretchr/testify/assert"
"io/ioutil"
"testing"
"time"
)
func TestClient(t *testing.T) {
client, err := New("test")
assert.Nil(t, client)
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "Invalid mode")
}
func TestAllRates(t *testing.T) {
client, _ := New(SANDBOX)
rates, err := client.GetAllRates()
assert.Nil(t, err)
assert.NotEmpty(t, rates)
assert.NotEmpty(t, rates.BTC)
assert.Contains(t, rates.BTC, "BTCKRW")
assert.NotEmpty(t, rates.FX)
}
func TestBtcRates(t *testing.T) {
client, _ := New(SANDBOX)
rates, err := client.GetBtcRates()
assert.Nil(t, err)
assert.NotEmpty(t, rates)
assert.Contains(t, rates, "BTCKRW")
}
func TestFxRates(t *testing.T) {
client, _ := New(SANDBOX)
rates, err := client.GetFxRates()
assert.Nil(t, err)
assert.NotEmpty(t, rates)
}
func TestBanks(t *testing.T) {
client, _ := New(SANDBOX)
banks, err := client.GetBanks()
assert.Nil(t, err)
assert.NotEmpty(t, banks)
}
func TestAuthenticate(t *testing.T) {
client, _ := New(SANDBOX)
creds := readCredentials()
ok, err := client.Authenticate(creds)
assert.Nil(t, err)
assert.NotNil(t, ok)
}
func TestTransfers(t *testing.T) {
client, _ := New(SANDBOX)
creds := readCredentials()
client.Authenticate(creds)
transfers, err := client.GetTransfers()
assert.Nil(t, err)
assert.NotEmpty(t, transfers)
}
func TestLimits(t *testing.T) {
client, _ := New(SANDBOX)
creds := readCredentials()
client.Authenticate(creds)
limits, err := client.GetLimits()
assert.Nil(t, err)
assert.NotEmpty(t, limits)
}
func TestLimitsAuthFailed(t *testing.T) {
token := Token{"Bearer",
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjo5MSwibGV2ZWwiOjEsImVtYWlsIjoiZHd1eml1QGJ6aXVtLm5ldCIsImp0aSI6IjBQR1kyOEdtaEE3cjBUR1RYb3AwRzBjb3RmemU2aTd0IiwiaWF0IjoxNDY0Njc5ODIzLCJleHAiOjE0NjQ2ODM0MjMsImlzcyI6Imh0dHBzOi8vd3d3LmJpdHdpcmUuY28vYXBpL3YxL29hdXRoIn0.NE9gjpcaQimsTjyaWQncmJ67c6rdzlvFlaR12lskgWw",
"xxx",
3600,
time.Now().Unix() + 3600,
}
client, _ := NewWithToken(SANDBOX, token)
_, err := client.GetLimits()
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "Unauthorized: Invalid token.")
}
func TestRecipients(t *testing.T) {
client, _ := New(SANDBOX)
creds := readCredentials()
client.Authenticate(creds)
recipients, err := client.GetRecipients()
assert.Nil(t, err)
assert.NotEmpty(t, recipients)
}
func TestRefreshToken(t *testing.T) {
client, _ := New(SANDBOX)
creds := readCredentials()
token, err := client.Authenticate(creds)
newToken, err := client.RefreshToken()
assert.Nil(t, err)
assert.NotNil(t, newToken)
assert.NotNil(t, newToken.AccessToken)
assert.NotEqual(t, token.AccessToken, newToken.AccessToken)
}
func TestRefreshTokenNoAuth(t *testing.T) {
client, _ := New(SANDBOX)
newToken, err := client.RefreshToken()
assert.NotNil(t, err)
fmt.Println(err)
assert.Equal(t, newToken, (Token{}))
}
func readCredentials() LoginCredentials {
data, err := ioutil.ReadFile("./test_sandbox.conf")
if err != nil {
panic(err)
} else {
config := LoginCredentials{}
err := json.Unmarshal(data, &config)
if err != nil {
panic(err)
} else {
pass, err := base64.StdEncoding.DecodeString(config.Password)
if err != nil {
panic(err)
} else {
config.Password = string(pass)
return config
}
}
}
}
|
package compute_test
import (
"errors"
"github.com/genevieve/leftovers/gcp/compute"
"github.com/genevieve/leftovers/gcp/compute/fakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Network", func() {
var (
client *fakes.NetworksClient
name string
network compute.Network
)
BeforeEach(func() {
client = &fakes.NetworksClient{}
name = "banana"
network = compute.NewNetwork(client, name)
})
Describe("Delete", func() {
It("deletes the network", func() {
err := network.Delete()
Expect(err).NotTo(HaveOccurred())
Expect(client.DeleteNetworkCall.CallCount).To(Equal(1))
Expect(client.DeleteNetworkCall.Receives.Network).To(Equal(name))
})
Context("when the client fails to delete", func() {
BeforeEach(func() {
client.DeleteNetworkCall.Returns.Error = errors.New("the-error")
})
It("returns the error", func() {
err := network.Delete()
Expect(err).To(MatchError("Delete: the-error"))
})
})
})
Describe("Name", func() {
It("returns the name", func() {
Expect(network.Name()).To(Equal(name))
})
})
Describe("Type", func() {
It("returns the type", func() {
Expect(network.Type()).To(Equal("Network"))
})
})
})
|
package stringutil
var Name = "GoatMan of Alcatraz" |
package files
import (
"fmt"
"io"
"os"
)
// Move file from src to dst.
func Move(src, dst string) (int64, error) {
// TODO
srcFile, err := os.Open(src)
if err != nil {
return 0, err
}
defer srcFile.Close()
srcFileStat, err := srcFile.Stat()
if err != nil {
return 0, err
}
if !srcFileStat.Mode().IsRegular() {
return 0, fmt.Errorf("%s is not a regular file", src)
}
dstFile, err := os.Create(dst)
if err != nil {
return 0, err
}
defer dstFile.Close()
return io.Copy(dstFile, srcFile)
}
|
package rollback
import "github.com/usvc/go-config"
const (
FlagMigrationsTableName = "table-name"
FlagSteps = "steps"
)
var (
conf = &config.Map{
FlagMigrationsTableName: &config.String{
Default: "migrations",
Shorthand: "n",
Usage: "defines the name of the table used to store migration steps",
},
FlagSteps: &config.Uint{
Default: 1,
Shorthand: "s",
Usage: "defines the number of steps to roll back",
},
}
)
|
package lumps
import (
"bytes"
"encoding/binary"
"log"
)
/**
Lump 12: Edge
*/
type Edge struct {
LumpGeneric
data [][2]uint16 // MAX_MAP_EDGES = 256000
}
func (lump *Edge) FromBytes(raw []byte, length int32) {
lump.data = make([][2]uint16, length/4)
err := binary.Read(bytes.NewBuffer(raw), binary.LittleEndian, &lump.data)
if err != nil {
log.Fatal(err)
}
lump.LumpInfo.SetLength(length)
}
func (lump *Edge) GetData() [][2]uint16 {
return lump.data
}
func (lump *Edge) ToBytes() []byte {
var buf bytes.Buffer
binary.Write(&buf, binary.LittleEndian, lump.data)
return buf.Bytes()
}
|
package banners
import (
"context"
"errors"
"fmt"
"io/ioutil"
"mime/multipart"
"sync"
)
//Service . Это сервис для управления баннерами
type Service struct {
mu sync.RWMutex
items []*Banner
}
//NewService . функция для создания нового сервиса
func NewService() *Service {
return &Service{items: make([]*Banner, 0)}
}
//Banner ..Структура нашего баннера
type Banner struct {
ID int64
Title string
Content string
Button string
Link string
Image string
}
//это стартовый ID но для каждого создание баннера его изменяем
var sID int64 = 0
//All ...
func (s *Service) All(ctx context.Context) ([]*Banner, error) {
s.mu.RLock()
defer s.mu.RUnlock()
//вернем все баннеры если их нет просто там окажется []
return s.items, nil
}
//ByID ...
func (s *Service) ByID(ctx context.Context, id int64) (*Banner, error) {
s.mu.RLock()
defer s.mu.RUnlock()
for _, v := range s.items {
//если ID элемента равно ID из параметра то мы нашли баннер
if v.ID == id {
//вернем баннер и ошибку nil
return v, nil
}
}
return nil, errors.New("item not found")
}
//Save ...
func (s *Service) Save(ctx context.Context, item *Banner, file multipart.File) (*Banner, error) {
s.mu.RLock()
defer s.mu.RUnlock()
//Проверяем если id равно 0 то создаем баннер
if item.ID == 0 {
//увеличиваем стартовый ID
sID++
//выставляем новый ID для баннера
item.ID = sID
//проверяем если файл пришел то сохроняем его под нужную имя например сейчас там только расширения (jpg) а мы его изменим (2.jpg)
if item.Image != "" {
//генерируем имя файла например ID равно 2 и раширения файла jpg то 2.jpg
item.Image = fmt.Sprint(item.ID) + "." + item.Image
//и вызываем фукции для загрузки файла на сервер и передаем ему файл и path где нужно сохранить файл ./web/banners/2.jpg
err := uploadFile(file, "./web/banners/"+item.Image)
//если при сохронения произошел какой нибуд ошибка то возврашаем ошибку
if err != nil {
return nil, err
}
}
//и после этих действий мы добавляем item в слайс
s.items = append(s.items, item)
//вернем item (так как мы берем его указател все измменения в нем уже ест) и ошибку nil
return item, nil
}
//если id не равно 0 то ишем его из сушествуеших
for k, v := range s.items {
//если нашли то заменяем старый баннер с новым
if v.ID == item.ID {
//проверяем если файл пришел то сохроняем его
if item.Image != "" {
//генерируем имя файла например ID равно 2 и раширения файла jpg то 2.jpg
item.Image = fmt.Sprint(item.ID) + "." + item.Image
//и вызываем фукции для загрузки файла на сервер и передаем ему файл и path где нужно сохранить файл ./web/banners/2.jpg
err := uploadFile(file, "./web/banners/"+item.Image)
//если при сохронения произошел какой нибуд ошибка то возврашаем ошибку
if err != nil {
return nil, err
}
} else {
//если файл не пришел то просто поставим передуюший значения в поля Image
item.Image = s.items[k].Image
}
//если нашли то в слайс под индексом найденного выставим новый элемент
s.items[k] = item
//вернем баннер и ошибку nil
return item, nil
}
}
//если не нашли то вернем ошибку что у нас такого банера не сушествует
return nil, errors.New("item not found")
}
//RemoveByID ... Метод для удаления
func (s *Service) RemoveByID(ctx context.Context, id int64) (*Banner, error) {
s.mu.RLock()
defer s.mu.RUnlock()
//ишем баннер из слайса
for k, v := range s.items {
//если нашли то удаляем его из слайса
if v.ID == id {
//берем все элементы до найденного и добавляем в него все элементы после найденного
s.items = append(s.items[:k], s.items[k+1:]...)
//вернем баннер и ошибку nil
return v, nil
}
}
//если не нашли то вернем ошибку что у нас такого банера не сушествует
return nil, errors.New("item not found")
}
//это функция сохраняет файл в сервере в заданной папке path и возврашает nil если все успешно или error если ест ошибка
func uploadFile(file multipart.File, path string) error {
//прочитаем вес файл и получаем слайс из байтов
var data, err = ioutil.ReadAll(file)
//если не удалос прочитат то вернем ошибку
if err != nil {
return errors.New("not readble data")
}
//записываем файл в заданной папке с публичными правами
err = ioutil.WriteFile(path, data, 0666)
//если не удалось записыват файл то вернем ошибку
if err != nil {
return errors.New("not saved from folder ")
}
//если все успешно то вернем nil
return nil
} |
// Provides HMAC Request Verification Via a Signature Header
package middleware
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
log "github.com/Sirupsen/logrus"
"github.com/spf13/viper"
"github.com/zenazn/goji/web"
)
// Ensures only certain requests are valid to the service
func HMACVerification(c *web.C, h http.Handler) http.Handler {
handler := func(w http.ResponseWriter, r *http.Request) {
// Read the request body
request_data, _ := ioutil.ReadAll(r.Body)
// Put the data back on the request object
r.Body = ioutil.NopCloser(bytes.NewBuffer(request_data))
// Get the request Signature Header
creds := bytes.SplitN([]byte(r.Header.Get("Signature")), []byte(":"), 2)
// Ensure we have creds
if len(creds) != 2 {
log.Warn("Malformed Request Signature Sent")
http.Error(w, http.StatusText(400), 400)
return
}
// Assign vars for easy ref
secret := viper.GetString(fmt.Sprintf("clients.%s", creds[0]))
sig := string(creds[1][:])
// Get the Secret Key for the Client
// Encode he request body
mac := hmac.New(sha256.New, []byte(secret))
mac.Write(request_data)
expected_sig := base64.StdEncoding.EncodeToString(mac.Sum(nil))
// Ensure Signatures match
if sig != expected_sig {
log.Warn("Invalid HMAC Signature")
http.Error(w, http.StatusText(400), 400)
return
}
// Call the next handler on success
h.ServeHTTP(w, r)
}
return http.HandlerFunc(handler)
}
|
package lastfm
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"xiamiToLastfm/musicbrainz"
"xiamiToLastfm/xiami"
)
// QuitChan: an empty channel used to signal main channel to stop.
var (
domain, apiUrl, sharedSecret, apiKey string
QuitChan chan struct{}
)
// https://www.last.fm/api/show/track.scrobble
type ScrobbleParams struct {
Artist []string `json:"artist,omitempty"`
Track []string `json:"track,omitempty"`
Timestamp []string `json:"timestamp,omitempty"`
Album []string `json:"album,omitempty"`
TrackNumber []string `json:"trackNumber,omitempty"`
Mbid []string `json:"mbid,omitempty"` //The MusicBrainz Track ID
AlbumArtist []string `json:"albumArtist,omitempty"`
DurationInSeconds []string `json:"duration,omitempty"`
ApiKey string `json:"api_key"`
ApiSig string `json:"api_sig"`
Sk string `json:"sk"`
Format string `json:"format"`
Method string `json:"method"`
}
// Send scrobble info to last.fm server
// https://www.last.fm/api/show/track.scrobble
func Scrobble(xm xiami.Track) error {
log.Println("lastfm.Scrobble playedChan track: ", xm)
v := url.Values{}
v.Set("artist[0]", xm.Artist)
v.Set("album[0]", xm.Album)
v.Set("track[0]", xm.Title)
if mbid, ok := musicbrainz.MbID(xm.Title, xm.Artist, xm.Album); ok {
v.Set("mbid[0]", string(mbid))
}
v.Set("timestamp[0]", fmt.Sprint(xm.Timestamp))
v.Set("method", "track.scrobble")
v.Set("sk", sk)
v.Set("api_key", apiKey)
sig := signature(&v)
v.Set("api_sig", sig)
v.Set("format", "json")
respData, err := postRequest(v.Encode())
if err != nil {
//if failed, insert back to channel
return err
}
accepted, ignored := scrobbleResponse(respData)
log.Printf("last.fm: Scrobble succese: accepted - %d, ignored - %d\n", accepted, ignored)
fmt.Printf("scrobbled:\t %s - %s \n", xm.Title, xm.Artist)
return nil
}
func scrobbleResponse(data []byte) (accepted, ignored int) {
type response struct {
Data struct {
Msg struct {
Accepted int `json:"accepted"`
Ignored int `json:"ignored"`
} `json:"@attr"`
} `json:"scrobbles"`
}
var resp response
json.Unmarshal(data, &resp)
return resp.Data.Msg.Accepted, resp.Data.Msg.Ignored
}
// Update nowplaying
// https://www.last.fm/api/show/track.updateNowPlaying
func UpdateNowPlaying(xm xiami.Track) error {
log.Println("last.fm: nowPlayingChan track: ", xm)
v := url.Values{}
v.Set("method", "track.updateNowPlaying")
v.Set("sk", sk)
v.Set("api_key", apiKey)
v.Set("artist", xm.Artist)
v.Set("album", xm.Album)
v.Set("track", xm.Title)
sig := signature(&v)
v.Set("api_sig", sig)
v.Set("format", "json")
if _, err := postRequest(v.Encode()); err != nil {
//if failed, as discard.
return err
}
fmt.Printf("updated:\t %s - %s \n", xm.Title, xm.Artist)
return nil
}
func getRequest(url string) ([]byte, error) {
log.Println("last.fm: getRequest url: ", url)
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
resData, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != 200 {
return nil, fmt.Errorf("status code error: '%s' on %s body: %s", res.Status, url, string(resData))
}
log.Println("last.fm: getRequest response: ", string(resData))
return resData, nil
}
func postRequest(query string) ([]byte, error) {
r := bytes.NewReader([]byte(query))
contentType := "application/x-www-form-urlencoded"
log.Println("last.fm: postRequest params: ", query)
res, err := http.Post(apiUrl, contentType, r)
if err != nil {
return nil, fmt.Errorf("postRequest has error: %s", err)
}
resData, _ := ioutil.ReadAll(res.Body)
defer res.Body.Close()
if res.StatusCode != 200 {
errCode, errMsg := handleError(resData)
if errCode == 9 {
fmt.Println(errMsg)
resetAuth()
fmt.Println("Config reset. Please re-start the program.")
close(QuitChan)
os.Exit(1)
}
return nil, fmt.Errorf("status code error: '%s' on %s body: %s", res.Status, apiUrl, string(resData))
}
log.Println("last.fm: postRequest response: ", string(resData))
return resData, nil
}
func toMap(byteData []byte) (result map[string]string) {
r := bytes.NewReader(byteData)
json.NewDecoder(r).Decode(&result)
return result
}
func handleError(errData []byte) (code int, msg string) {
type ErrResponse struct {
Code int `json:"error"`
Msg string `json:"message"`
}
var resp ErrResponse
json.Unmarshal(errData, &resp)
return resp.Code, resp.Msg
}
|
package mesh
import (
"fmt"
"github.com/layer5io/meshery/mesheryctl/internal/cli/root/config"
"github.com/layer5io/meshery/mesheryctl/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
removeCmd = &cobra.Command{
Use: "remove",
Short: "remove a service mesh in the kubernetes cluster",
Args: cobra.MinimumNArgs(0),
Long: `remove service mesh in the connected kubernetes cluster`,
PreRunE: func(cmd *cobra.Command, args []string) error {
log.Infof("Verifying prerequisites...")
mctlCfg, err := config.GetMesheryCtl(viper.GetViper())
if err != nil {
return errors.Wrap(err, "error processing config")
}
if len(args) < 1 {
meshName, err = validateMesh(mctlCfg, tokenPath, "")
} else {
meshName, err = validateMesh(mctlCfg, tokenPath, args[0])
}
if err != nil {
return errors.Wrap(err, "error validating request")
}
if err = validateAdapter(mctlCfg, tokenPath, meshName); err != nil {
return errors.Wrap(err, "adapter not valid")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
s := utils.CreateDefaultSpinner(fmt.Sprintf("Removing %s", meshName), fmt.Sprintf("\n%s service mesh removed successfully", meshName))
mctlCfg, err := config.GetMesheryCtl(viper.GetViper())
if err != nil {
return errors.Wrap(err, "error processing config")
}
s.Start()
_, err = sendDeployRequest(mctlCfg, meshName, true)
if err != nil {
return errors.Wrap(err, "error installing service mesh")
}
s.Stop()
//log.Infof("Verifying Installation")
//s.Start()
//_, err = waitForDeployResponse(mctlCfg, meshName)
//if err != nil {
// return errors.Wrap(err, "error verifying installation")
//}
//s.Stop()
return nil
},
}
)
func init() {
removeCmd.Flags().StringVarP(&adapterURL, "adapter", "a", "meshery-istio:10000", "Adapter to use for installation")
removeCmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "Kubernetes namespace to be used for deploying the validation tests and sample workload")
removeCmd.Flags().StringVarP(&tokenPath, "tokenPath", "t", "", "Path to token for authenticating to Meshery API")
_ = removeCmd.MarkFlagRequired("tokenPath")
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package fixtures contains fixtures useful for Kiosk mode tests.
package fixtures
import (
"context"
"encoding/json"
"io/ioutil"
"path/filepath"
"time"
"github.com/shirou/gopsutil/v3/process"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash/ashproc"
"chromiumos/tast/local/kioskmode"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddFixture(&testing.Fixture{
Name: fixture.KioskLoggedInAsh,
Desc: "Kiosk mode started with default app setup, DUT is enrolled",
Contacts: []string{"kamilszarek@google.com", "alt-modalities-stability@google.com"},
Impl: &kioskFixture{
autoLaunchKioskAppID: kioskmode.WebKioskAccountID,
useDefaultLocalAccounts: true,
},
SetUpTimeout: chrome.ManagedUserLoginTimeout,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
PostTestTimeout: 15 * time.Second,
Parent: fixture.FakeDMSEnrolled,
})
testing.AddFixture(&testing.Fixture{
Name: fixture.KioskLoggedInLacros,
Desc: "Kiosk mode started with default app setup, DUT is enrolled and Lacros enabled",
Contacts: []string{"irfedorova@google.com", "chromeos-kiosk-eng@google.com"},
Impl: &kioskFixture{
autoLaunchKioskAppID: kioskmode.WebKioskAccountID,
useDefaultLocalAccounts: true,
extraOpts: []chrome.Option{chrome.ExtraArgs("--enable-features=LacrosSupport,WebKioskEnableLacros", "--lacros-availability-ignore")},
},
SetUpTimeout: chrome.ManagedUserLoginTimeout,
ResetTimeout: chrome.ResetTimeout,
TearDownTimeout: chrome.ResetTimeout,
PostTestTimeout: 15 * time.Second,
Parent: fixture.FakeDMSEnrolled,
})
}
type kioskFixture struct {
// cr is a connection to an already-started Chrome instance that loads policies from FakeDMS.
cr *chrome.Chrome
// fdms is the already running DMS server from the parent fixture.
fdms *fakedms.FakeDMS
// useDefaultLocalAccounts enables default local accounts generated in
// kioskmode.New().
useDefaultLocalAccounts bool
// localAccounts is the policy with local accounts configuration that will
// be applied for Kiosk mode.
localAccounts *policy.DeviceLocalAccounts
// autoLaunchKioskAppID is a preselected Kiosk app ID used for autolaunch.
autoLaunchKioskAppID string
// extraOpts contains extra options passed to Chrome.
extraOpts []chrome.Option
// proc is the root Chrome process. Kept to be used in Reset() checking if
// Chrome process hasn't restarted.
proc *process.Process
// kiosk is a reference to the Kiosk intstance.
kiosk *kioskmode.Kiosk
}
// KioskFixtData is returned by the fixture.
type KioskFixtData struct {
// fakeDMS is an already running DMS server.
fakeDMS *fakedms.FakeDMS
// chrome is a connection to an already-started Chrome instance that loads policies from FakeDMS.
chrome *chrome.Chrome
}
// Chrome implements the HasChrome interface.
func (f KioskFixtData) Chrome() *chrome.Chrome {
if f.chrome == nil {
panic("Chrome is called with nil chrome instance")
}
return f.chrome
}
// FakeDMS implements the HasFakeDMS interface.
func (f KioskFixtData) FakeDMS() *fakedms.FakeDMS {
if f.fakeDMS == nil {
panic("FakeDMS is called with nil fakeDMS instance")
}
return f.fakeDMS
}
// PolicyFileDump is the filename where the state of policies is dumped after the test ends.
const PolicyFileDump = "policies.json"
func (k *kioskFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
fdms, ok := s.ParentValue().(*fakedms.FakeDMS)
if !ok {
s.Fatal("Parent is not a fakeDMSEnrolled fixture")
}
k.fdms = fdms
options := []kioskmode.Option{
kioskmode.AutoLaunch(k.autoLaunchKioskAppID),
kioskmode.ExtraChromeOptions(k.extraOpts...),
}
if k.useDefaultLocalAccounts {
options = append(options, kioskmode.DefaultLocalAccounts())
} else {
options = append(options, kioskmode.CustomLocalAccounts(k.localAccounts))
}
kiosk, cr, err := kioskmode.New(ctx, fdms, options...)
if err != nil {
s.Fatal("Failed to create Chrome in kiosk mode: ", err)
}
proc, err := ashproc.Root()
if err != nil {
if err := kiosk.Close(ctx); err != nil {
s.Error("There was an error while closing Kiosk: ", err)
}
s.Fatal("Failed to get root Chrome PID: ", err)
}
chrome.Lock()
k.cr = cr
k.proc = proc
k.kiosk = kiosk
return &KioskFixtData{k.fdms, k.cr}
}
func (k *kioskFixture) TearDown(ctx context.Context, s *testing.FixtState) {
chrome.Unlock()
if k.cr == nil {
s.Fatal("Chrome not yet started")
}
if err := k.kiosk.Close(ctx); err != nil {
s.Error("There was an error while closing Kiosk: ", err)
}
k.cr = nil
}
func (k *kioskFixture) Reset(ctx context.Context) error {
// Check the connection to Chrome.
if err := k.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
// Check if the root chrome process is still running.
if r, err := k.proc.IsRunning(); err != nil || !r {
return errors.New("found root chrome process termination while running in Kiosk mode")
}
return nil
}
func (k *kioskFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {}
func (k *kioskFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
tconn, err := k.cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create TestAPI connection: ", err)
}
policies, err := policyutil.PoliciesFromDUT(ctx, tconn)
if err != nil {
s.Fatal("Failed to obtain policies from Chrome: ", err)
}
b, err := json.MarshalIndent(policies, "", " ")
if err != nil {
s.Fatal("Failed to marshal policies: ", err)
}
// Dump all policies as seen by Chrome to the tests OutDir.
if err := ioutil.WriteFile(filepath.Join(s.OutDir(), PolicyFileDump), b, 0644); err != nil {
s.Error("Failed to dump policies to file: ", err)
}
}
|
package jago
func Startup(initialClassName string) {
thread := THREAD_MANAGER.NewThread("main")
//systemClass := BOOTSTRAP_CLASSLOADER.CreateClass("java/lang/System").(*Class)
//systemClass.Link()
//systemClassClinits := systemClass.Initialize()
//for _, clinit := range systemClassClinits { thread.enqueueFrame(NewStackFrame(clinit))}
//
//initializeSystemClassMethod := systemClass.GetMethod("initializeSystemClass", "()V")
//thread.enqueueFrame(NewStackFrame(initializeSystemClassMethod))
initialClass := BOOTSTRAP_CLASSLOADER.CreateClass(initialClassName).(*Class)
// As per jvm specification, initial main method needs to initialize initial class
initialClass.Link()
initialClassClinits := initialClass.Initialize()
for _, clinit := range initialClassClinits { thread.enqueueFrame(NewStackFrame(clinit))}
mainMethod := initialClass.GetMethod(MAIN_METHOD_NAME, MAIN_METHOD_DESCRIPTOR)
thread.enqueueFrame(NewStackFrame(mainMethod))
thread.Run()
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/luckydog8686/logs"
"net/http"
"reflect"
)
type SS struct {
Name string
}
func main(){
r := GenRunc(TestGen)
router := gin.Default()
router.POST("/ping",r)
router.Run("127.0.0.1:80")
}
func GenRunc(fun interface{}) gin.HandlerFunc {
t :=reflect.TypeOf(fun)
logs.Info(t)
inV := t.In(0)
in_Kind := inV.Kind()
fmt.Printf("Kind: %v Name: %v -----------",in_Kind,inV.Name())
tx := func(c *gin.Context){
s := reflect.New(inV.Elem()).Interface()
logs.Info(s)
logs.Info(reflect.TypeOf(s))
err2 := c.Bind(s)
logs.Info(reflect.TypeOf(s))
if err2 != nil {
logs.Error(err2)
}
logs.Info("S:",s)
res:=Call(fun,s)
logs.Info(len(res))
data := res[0].Interface()
err := res[1].Interface()
logs.Info(data)
logs.Info(res[1])
//data,err := TestGen(s)
c.JSON(http.StatusOK,gin.H{"error":err,"data":data})
}
return tx
}
func TestGen(ss *SS) (*SS,error) {
logs.Info(ss)
return ss,nil
}
func Call(fun interface{},params ...interface{}) []reflect.Value {
f := reflect.ValueOf(fun)
in := make([]reflect.Value, len(params))
for k, param := range params {
in[k] = reflect.ValueOf(param)
}
return f.Call(in)
} |
package calc
import "bytes"
type Expression interface {
String() string
}
// 整数求值
type IntegerLiteralExpression struct {
Token Token
Value int64
}
func (il *IntegerLiteralExpression) String() string {
return il.Token.Literal
}
// 前缀 1+ -1
type PrefixExpression struct {
Token *Token
Operator string
Right Expression
}
// 括号内部计算
// (+1)
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.Write([]byte("("))
out.Write([]byte(pe.Operator))
out.Write([]byte(pe.Right.String()))
out.Write([]byte(")"))
return out.String()
}
type InfixExpression struct {
Token Token
Left Expression
Operator string
Right Expression
}
//(1+2)
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.Write([]byte("("))
out.Write([]byte(ie.Left.String()))
out.Write([]byte(" "))
out.Write([]byte(ie.Operator))
out.Write([]byte(" "))
out.Write([]byte(ie.Right.String()))
out.Write([]byte(")"))
return out.String()
}
|
package middleware
import (
"bytes"
"encoding/json"
"github.com/gin-gonic/gin"
"mockman/di"
)
type bodyLogWriter struct {
gin.ResponseWriter
body *bytes.Buffer
}
func (w bodyLogWriter) Write(b []byte) (int, error) {
w.body.Write(b)
return w.ResponseWriter.Write(b)
}
func RespLogMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
blw := &bodyLogWriter{body: bytes.NewBufferString(""), ResponseWriter: c.Writer}
c.Writer = blw
reqInfo := getReqInfo(c)
c.Set("reqInfo", &reqInfo)
reqJson, _ := json.MarshalIndent(reqInfo, "", " ")
c.Next()
di.Zap().Infof("Send HTTP response, req uri: %s, \nreqInfo: %v, \nresp code: %v, \nrespbody: %v",
c.Request.RequestURI, string(reqJson), c.Writer.Status(), blw.body.String())
}
}
|
package src
//最小区间
func smallestRange(nums [][]int) []int {
for
}
|
package admin
import (
db "github.com/JieeiroSst/LapTRWeb/config"
)
type Profile struct {
UserID int `json:"userid"`
UserName string `json:"username"`
Address string `json:"address"`
Password string `json:"password"`
}
func ShowListProfile() []Profile {
db := db.DbConn()
selDB, err := db.Query("select * from Profile")
if err != nil {
panic(err.Error())
}
profile := Profile{}
res := []Profile{}
for selDB.Next() {
var id int
var username, address, password string
err = selDB.Scan(&id, &username, &address, &password)
if err != nil {
panic(err.Error())
}
profile.UserID = id
profile.UserName = username
profile.Address = address
profile.Password = password
res = append(res, profile)
}
defer db.Close()
return res
}
func ShowSingleProfile(id int) Profile {
db := db.DbConn()
selDB, err := db.Query("select * from Profile where UserId=?", id)
if err != nil {
panic(err.Error())
}
profile := Profile{}
for selDB.Next() {
var ids int
var username, address, password string
err = selDB.Scan(&ids, &username, &address, &password)
if err != nil {
panic(err.Error())
}
profile.UserID = ids
profile.UserName = username
profile.Address = address
profile.Password = password
}
defer db.Close()
return profile
}
func UpdateProfile(p Profile) {
db := db.DbConn()
update, err := db.Prepare("update Profile set UserName=? and Address=? where UserId=?")
if err != nil {
panic(err.Error())
}
id := p.UserID
username := p.UserName
address := p.Address
update.Exec(username, address, id)
defer db.Close()
}
func DeleteProfile(id int) {
db := db.DbConn()
delete, err := db.Prepare("delete from Profile where UserId=?")
if err != nil {
panic(err.Error())
}
delete.Exec(id)
defer db.Close()
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// {{/*
// +build execgen_template
//
// This file is the execgen template for mergejoiner.eg.go. It's formatted in a
// special way, so it's both valid Go and a valid text/template input. This
// permits editing this file with editor support.
//
// */}}
package colexecjoin
import (
"context"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/col/coldataext"
"github.com/cockroachdb/cockroach/pkg/col/typeconv"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/errors"
)
// Workaround for bazel auto-generated code. goimports does not automatically
// pick up the right packages when run within the bazel sandbox.
var (
_ = typeconv.DatumVecCanonicalTypeFamily
_ apd.Context
_ coldataext.Datum
_ duration.Duration
_ tree.AggType
)
// {{/*
// Declarations to make the template compile properly.
// _GOTYPE is the template variable.
type _GOTYPE interface{}
// _CANONICAL_TYPE_FAMILY is the template variable.
const _CANONICAL_TYPE_FAMILY = types.UnknownFamily
// _TYPE_WIDTH is the template variable.
const _TYPE_WIDTH = 0
// _ASSIGN_EQ is the template equality function for assigning the first input
// to the result of the second input == the third input.
func _ASSIGN_EQ(_, _, _, _, _, _ interface{}) int {
colexecerror.InternalError(errors.AssertionFailedf(""))
}
// _ASSIGN_CMP is the template equality function for assigning the first input
// to the result of comparing the second input to the third input which returns
// an integer. That integer is:
// - negative if left < right
// - zero if left == right
// - positive if left > right.
func _ASSIGN_CMP(_, _, _, _, _ interface{}) int {
colexecerror.VectorizedInternalPanic("")
}
// _L_SEL_IND is the template type variable for the loop variable that
// is either curLIdx or lSel[curLIdx] depending on whether we're in a
// selection or not.
const _L_SEL_IND = 0
// _R_SEL_IND is the template type variable for the loop variable that
// is either curRIdx or rSel[curRIdx] depending on whether we're in a
// selection or not.
const _R_SEL_IND = 0
// _SEL_ARG is used in place of the string "$sel", since that isn't valid go
// code.
const _SEL_ARG = 0
// _JOIN_TYPE is used in place of the string "$.JoinType", since that isn't
// valid go code.
const _JOIN_TYPE = 0
// */}}
type mergeJoin_JOIN_TYPE_STRINGOp struct {
*mergeJoinBase
}
var _ colexecop.Operator = &mergeJoin_JOIN_TYPE_STRINGOp{}
// {{/*
// This code snippet is the "meat" of the probing phase.
func _PROBE_SWITCH(_JOIN_TYPE joinTypeInfo, _SEL_PERMUTATION selPermutation) { // */}}
// {{define "probeSwitch" -}}
// {{$sel := $.SelPermutation}}
lNulls := lVec.Nulls()
rNulls := rVec.Nulls()
switch lVec.CanonicalTypeFamily() {
// {{range $overload := $.Global.Overloads}}
case _CANONICAL_TYPE_FAMILY:
switch colType.Width() {
// {{range .WidthOverloads}}
case _TYPE_WIDTH:
lKeys := lVec.TemplateType()
rKeys := rVec.TemplateType()
var (
lGroup, rGroup group
cmp int
match bool
lVal, rVal _GOTYPE
lSelIdx, rSelIdx int
)
for o.groups.nextGroupInCol(&lGroup, &rGroup) {
curLIdx := lGroup.rowStartIdx
curRIdx := rGroup.rowStartIdx
curLEndIdx := lGroup.rowEndIdx
curREndIdx := rGroup.rowEndIdx
areGroupsProcessed := false
_LEFT_UNMATCHED_GROUP_SWITCH(_JOIN_TYPE)
_RIGHT_UNMATCHED_GROUP_SWITCH(_JOIN_TYPE)
// Expand or filter each group based on the current equality column.
for curLIdx < curLEndIdx && curRIdx < curREndIdx && !areGroupsProcessed {
cmp = 0
lNull := lNulls.NullAt(_L_SEL_IND)
rNull := rNulls.NullAt(_R_SEL_IND)
// {{if _JOIN_TYPE.IsSetOp}}
// {{/*
// Set operations allow null equality, so we handle
// NULLs first.
// */}}
if lNull {
// {{/* If we have NULL on the left, then it is smaller than the right value. */}}
cmp--
}
if rNull {
// {{/* If we have NULL on the right, then it is smaller than the left value. */}}
cmp++
}
var nullMatch bool
// {{/* Remove unused warning for some code paths of INTERSECT ALL join. */}}
_ = nullMatch
// If we have a NULL match, it will take precedence over
// cmp value set above.
nullMatch = lNull && rNull
// {{else}}
// {{/*
// Non-set operation joins do not allow null equality,
// so if either value is NULL, the tuples are not
// matches.
// */}}
// TODO(yuzefovich): we can advance both sides if both are
// NULL.
if lNull {
_NULL_FROM_LEFT_SWITCH(_JOIN_TYPE)
curLIdx++
continue
}
if rNull {
_NULL_FROM_RIGHT_SWITCH(_JOIN_TYPE)
curRIdx++
continue
}
// {{end}}
needToCompare := true
// {{if _JOIN_TYPE.IsSetOp}}
// For set operation joins we have already set 'cmp' to
// correct value above if we have a null value at least
// on one side.
needToCompare = !lNull && !rNull
// {{end}}
if needToCompare {
lSelIdx = _L_SEL_IND
lVal = lKeys.Get(lSelIdx)
rSelIdx = _R_SEL_IND
rVal = rKeys.Get(rSelIdx)
_ASSIGN_CMP(cmp, lVal, rVal, lKeys, rKeys)
}
if cmp == 0 {
// Find the length of the groups on each side.
lGroupLength, rGroupLength := 1, 1
// If a group ends before the end of the probing batch,
// then we know it is complete.
lComplete := curLEndIdx < o.proberState.lLength
rComplete := curREndIdx < o.proberState.rLength
beginLIdx, beginRIdx := curLIdx, curRIdx
curLIdx++
curRIdx++
// Find the length of the group on the left.
for curLIdx < curLEndIdx {
// {{if _JOIN_TYPE.IsSetOp}}
if nullMatch {
// {{/*
// We have a NULL match, so we only
// extend the left group if we have a
// NULL element.
// */}}
if !lNulls.NullAt(_L_SEL_IND) {
lComplete = true
break
}
} else
// {{end}}
{
if lNulls.NullAt(_L_SEL_IND) {
lComplete = true
break
}
lSelIdx = _L_SEL_IND
newLVal := lKeys.Get(lSelIdx)
_ASSIGN_EQ(match, newLVal, lVal, _, lKeys, lKeys)
if !match {
lComplete = true
break
}
}
lGroupLength++
curLIdx++
}
// Find the length of the group on the right.
for curRIdx < curREndIdx {
// {{if _JOIN_TYPE.IsSetOp}}
if nullMatch {
// {{/*
// We have a NULL match, so we only
// extend the right group if we have a
// NULL element.
// */}}
if !rNulls.NullAt(_R_SEL_IND) {
rComplete = true
break
}
} else
// {{end}}
{
if rNulls.NullAt(_R_SEL_IND) {
rComplete = true
break
}
rSelIdx = _R_SEL_IND
newRVal := rKeys.Get(rSelIdx)
_ASSIGN_EQ(match, newRVal, rVal, _, rKeys, rKeys)
if !match {
rComplete = true
break
}
}
rGroupLength++
curRIdx++
}
// Last equality column and either group is incomplete. Save state
// and have it handled in the next iteration.
if eqColIdx == len(o.left.eqCols)-1 && (!lComplete || !rComplete) {
o.appendToBufferedGroup(ctx, &o.left, o.proberState.lBatch, lSel, beginLIdx, lGroupLength)
o.proberState.lIdx = lGroupLength + beginLIdx
o.appendToBufferedGroup(ctx, &o.right, o.proberState.rBatch, rSel, beginRIdx, rGroupLength)
o.proberState.rIdx = rGroupLength + beginRIdx
o.groups.finishedCol()
break EqLoop
}
if eqColIdx < len(o.left.eqCols)-1 {
o.groups.addGroupsToNextCol(beginLIdx, lGroupLength, beginRIdx, rGroupLength)
} else {
// {{if _JOIN_TYPE.IsLeftSemi}}
leftSemiGroupLength := lGroupLength
// {{if _JOIN_TYPE.IsSetOp}}
// For INTERSECT ALL join we add a left semi group
// of length min(lGroupLength, rGroupLength).
if rGroupLength < lGroupLength {
leftSemiGroupLength = rGroupLength
}
// {{end}}
o.groups.addLeftSemiGroup(beginLIdx, leftSemiGroupLength)
// {{else if _JOIN_TYPE.IsRightSemi}}
o.groups.addRightSemiGroup(beginRIdx, rGroupLength)
// {{else if _JOIN_TYPE.IsLeftAnti}}
// {{if _JOIN_TYPE.IsSetOp}}
// For EXCEPT ALL join we add (lGroupLength - rGroupLength) number
// (if positive) of unmatched left groups.
for leftUnmatchedTupleIdx := beginLIdx + rGroupLength; leftUnmatchedTupleIdx < beginLIdx+lGroupLength; leftUnmatchedTupleIdx++ {
// Right index here doesn't matter.
o.groups.addLeftUnmatchedGroup(leftUnmatchedTupleIdx, beginRIdx)
}
// {{else}}
// With LEFT ANTI join, we are only interested in unmatched tuples
// from the left, and all tuples in the current group have a match.
// {{end}}
// {{else if _JOIN_TYPE.IsRightAnti}}
// With RIGHT ANTI join, we are only interested in unmatched tuples
// from the right, and all tuples in the current group have a match.
// {{else}}
// Neither group ends with the batch, so add the group to the
// circular buffer.
o.groups.addGroupsToNextCol(beginLIdx, lGroupLength, beginRIdx, rGroupLength)
// {{end}}
}
} else { // mismatch
// The line below is a compact form of the following:
// incrementLeft :=
// (cmp < 0 && o.left.directions[eqColIdx] == execinfrapb.Ordering_Column_ASC) ||
// (cmp > 0 && o.left.directions[eqColIdx] == execinfrapb.Ordering_Column_DESC).
incrementLeft := cmp < 0 == (o.left.directions[eqColIdx] == execinfrapb.Ordering_Column_ASC)
if incrementLeft {
curLIdx++
_INCREMENT_LEFT_SWITCH(_JOIN_TYPE, _SEL_ARG)
} else {
curRIdx++
_INCREMENT_RIGHT_SWITCH(_JOIN_TYPE, _SEL_ARG)
}
}
}
_PROCESS_NOT_LAST_GROUP_IN_COLUMN_SWITCH(_JOIN_TYPE)
// Both o.proberState.lIdx and o.proberState.rIdx should point to the
// last elements processed in their respective batches.
o.proberState.lIdx = curLIdx
o.proberState.rIdx = curRIdx
}
// {{end}}
}
// {{end}}
default:
colexecerror.InternalError(errors.AssertionFailedf("unhandled type %s", colType))
}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet processes an unmatched group from the left.
func _LEFT_UNMATCHED_GROUP_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}}
// {{define "leftUnmatchedGroupSwitch" -}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Unmatched groups are not possible with INNER, LEFT SEMI, RIGHT SEMI, and
// INTERSECT ALL joins (the latter has IsLeftSemi == true), so there is
// nothing to do here.
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
if lGroup.unmatched {
if curLIdx+1 != curLEndIdx {
colexecerror.InternalError(errors.AssertionFailedf("unexpectedly length %d of the left unmatched group is not 1", curLEndIdx-curLIdx))
}
// The row already does not have a match, so we don't need to do any
// additional processing.
o.groups.addLeftUnmatchedGroup(curLIdx, curRIdx)
curLIdx++
areGroupsProcessed = true
}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
// {{/*
// Unmatched groups from the left are not possible with RIGHT OUTER and
// RIGHT ANTI joins, so there is nothing to do here.
// */}}
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet processes an unmatched group from the right.
func _RIGHT_UNMATCHED_GROUP_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}}
// {{define "rightUnmatchedGroupSwitch" -}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Unmatched groups are not possible with INNER, LEFT SEMI, RIGHT SEMI, and
// INTERSECT ALL joins (the latter has IsLeftSemi == true), so there is
// nothing to do here.
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
// {{/*
// Unmatched groups from the right are not possible with LEFT OUTER, LEFT
// ANTI, and EXCEPT ALL joins (the latter has IsLeftAnti == true), so there
// is nothing to do here.
// */}}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
if rGroup.unmatched {
if curRIdx+1 != curREndIdx {
colexecerror.InternalError(errors.AssertionFailedf("unexpectedly length %d of the right unmatched group is not 1", curREndIdx-curRIdx))
}
// The row already does not have a match, so we don't need to do any
// additional processing.
o.groups.addRightUnmatchedGroup(curLIdx, curRIdx)
curRIdx++
areGroupsProcessed = true
}
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet decides what to do if we encounter null in the equality
// column from the left input. Note that the case of Null equality *must* be
// checked separately.
func _NULL_FROM_LEFT_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}}
// {{define "nullFromLeftSwitch" -}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Nulls coming from the left input are ignored in INNER, LEFT SEMI, and
// RIGHT SEMI joins.
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
o.groups.addLeftUnmatchedGroup(curLIdx, curRIdx)
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
// {{/*
// Nulls coming from the left input are ignored in RIGHT OUTER and RIGHT
// ANTI joins.
// */}}
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet decides what to do if we encounter null in the equality
// column from the right input. Note that the case of Null equality *must* be
// checked separately.
func _NULL_FROM_RIGHT_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}}
// {{define "nullFromRightSwitch" -}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Nulls coming from the right input are ignored in INNER, LEFT SEMI, and
// RIGHT SEMI joins.
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
// {{/*
// Nulls coming from the right input are ignored in LEFT OUTER and LEFT
// ANTI joins.
// */}}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
o.groups.addRightUnmatchedGroup(curLIdx, curRIdx)
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet decides what to do when - while looking for a match
// between two inputs - we need to advance the left side, i.e. it decides how
// to handle an unmatched tuple from the left.
func _INCREMENT_LEFT_SWITCH(_JOIN_TYPE joinTypeInfo, _SEL_PERMUTATION selPermutation) { // */}}
// {{define "incrementLeftSwitch" -}}
// {{$sel := $.SelPermutation}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Unmatched tuple from the left source is not outputted in INNER, LEFT
// SEMI, RIGHT SEMI, and INTERSECT ALL joins (the latter has
// IsLeftSemi == true).
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
// All the rows on the left within the current group will not get a match on
// the right, so we're adding each of them as a left unmatched group.
o.groups.addLeftUnmatchedGroup(curLIdx-1, curRIdx)
for curLIdx < curLEndIdx {
// {{/*
// EXCEPT ALL join allows NULL equality, so we have special
// treatment of NULLs.
// */}}
// {{if _JOIN_TYPE.IsSetOp}}
newLValNull := lNulls.NullAt(_L_SEL_IND)
if lNull != newLValNull {
// We have a null mismatch, so we've reached the end of the current
// group on the left.
break
} else if newLValNull && lNull {
nullMatch = true
} else {
nullMatch = false
}
// {{else}}
if lNulls.NullAt(_L_SEL_IND) {
break
}
// {{end}}
// {{if _JOIN_TYPE.IsSetOp}}
// {{/*
// We have checked for null equality above and set nullMatch to the
// correct value. If it is true, then both the old and the new
// values are NULL, so there is no further comparison needed.
// */}}
if !nullMatch {
// {{end}}
lSelIdx = _L_SEL_IND
// {{with .Global}}
newLVal := lKeys.Get(lSelIdx)
_ASSIGN_EQ(match, newLVal, lVal, _, lKeys, lKeys)
// {{end}}
if !match {
break
}
// {{if _JOIN_TYPE.IsSetOp}}
}
// {{end}}
o.groups.addLeftUnmatchedGroup(curLIdx, curRIdx)
curLIdx++
}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
// {{/*
// Unmatched tuple from the left source is not outputted in RIGHT OUTER
// and RIGHT ANTI joins.
// */}}
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet decides what to do when - while looking for a match
// between two inputs - we need to advance the right side, i.e. it decides how
// to handle an unmatched tuple from the right.
func _INCREMENT_RIGHT_SWITCH(_JOIN_TYPE joinTypeInfo, _SEL_PERMUTATION selPermutation) { // */}}
// {{define "incrementRightSwitch" -}}
// {{$sel := $.SelPermutation}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Unmatched tuple from the right source is not outputted in INNER, LEFT
// SEMI, RIGHT SEMI, and INTERSECT ALL joins (the latter has
// IsLeftSemi == true).
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
// {{/*
// Unmatched tuple from the right source is not outputted in LEFT OUTER,
// LEFT ANTI, and EXCEPT ALL joins (the latter has IsLeftAnti == true).
// */}}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
// All the rows on the right within the current group will not get a match on
// the left, so we're adding each of them as a right unmatched group.
o.groups.addRightUnmatchedGroup(curLIdx, curRIdx-1)
for curRIdx < curREndIdx {
if rNulls.NullAt(_R_SEL_IND) {
break
}
rSelIdx = _R_SEL_IND
// {{with .Global}}
newRVal := rKeys.Get(rSelIdx)
_ASSIGN_EQ(match, newRVal, rVal, _, rKeys, rKeys)
// {{end}}
if !match {
break
}
o.groups.addRightUnmatchedGroup(curLIdx, curRIdx)
curRIdx++
}
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{/*
// This code snippet processes all but last groups in a column after we have
// reached the end of either the left or right group.
func _PROCESS_NOT_LAST_GROUP_IN_COLUMN_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}}
// {{define "processNotLastGroupInColumnSwitch" -}}
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
// {{/*
// Nothing to do here since an unmatched tuple is omitted.
// */}}
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
if !o.groups.isLastGroupInCol() && !areGroupsProcessed {
// The current group is not the last one within the column, so it cannot be
// extended into the next batch, and we need to process it right now. Any
// unprocessed row in the left group will not get a match, so each one of
// them becomes a new unmatched group with a corresponding null group.
for curLIdx < curLEndIdx {
o.groups.addLeftUnmatchedGroup(curLIdx, curRIdx)
curLIdx++
}
}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
if !o.groups.isLastGroupInCol() && !areGroupsProcessed {
// The current group is not the last one within the column, so it cannot be
// extended into the next batch, and we need to process it right now. Any
// unprocessed row in the right group will not get a match, so each one of
// them becomes a new unmatched group with a corresponding null group.
for curRIdx < curREndIdx {
o.groups.addRightUnmatchedGroup(curLIdx, curRIdx)
curRIdx++
}
}
// {{end}}
// {{end}}
// {{/*
}
// */}}
// {{range $sel := $.SelPermutations}}
func (o *mergeJoin_JOIN_TYPE_STRINGOp) probeBodyLSel_IS_L_SELRSel_IS_R_SEL(ctx context.Context) {
lSel := o.proberState.lBatch.Selection()
rSel := o.proberState.rBatch.Selection()
EqLoop:
for eqColIdx := 0; eqColIdx < len(o.left.eqCols); eqColIdx++ {
leftColIdx := o.left.eqCols[eqColIdx]
rightColIdx := o.right.eqCols[eqColIdx]
lVec := o.proberState.lBatch.ColVec(int(leftColIdx))
rVec := o.proberState.rBatch.ColVec(int(rightColIdx))
colType := o.left.sourceTypes[leftColIdx]
_PROBE_SWITCH(_JOIN_TYPE, _SEL_ARG)
// Look at the groups associated with the next equality column by moving
// the circular buffer pointer up.
o.groups.finishedCol()
}
}
// {{end}}
// {{/*
// This code snippet builds the output corresponding to the left side (i.e. is
// the main body of buildLeftGroupsFromBatch()).
func _LEFT_SWITCH(_JOIN_TYPE joinTypeInfo, _HAS_SELECTION bool) { // */}}
// {{define "leftSwitch" -}}
var srcNulls *coldata.Nulls
if src != nil {
srcNulls = src.Nulls()
}
outNulls := out.Nulls()
switch input.canonicalTypeFamilies[colIdx] {
// {{range $.Global.Overloads}}
case _CANONICAL_TYPE_FAMILY:
switch input.sourceTypes[colIdx].Width() {
// {{range .WidthOverloads}}
case _TYPE_WIDTH:
var srcCol _GOTYPESLICE
if src != nil {
srcCol = src.TemplateType()
}
outCol := out.TemplateType()
var val _GOTYPE
var srcStartIdx int
// Loop over every group.
for ; o.builderState.left.groupsIdx < len(leftGroups); o.builderState.left.groupsIdx++ {
leftGroup := &leftGroups[o.builderState.left.groupsIdx]
// {{if _JOIN_TYPE.IsLeftAnti}}
// {{/*
// With LEFT ANTI and EXCEPT ALL joins (the latter has
// IsLeftAnti == true) we want to emit output corresponding only to
// unmatched tuples, so we're skipping all "matched" groups.
// */}}
if !leftGroup.unmatched {
continue
}
// {{end}}
// If curSrcStartIdx is uninitialized, start it at the group's start idx.
// Otherwise continue where we left off.
if o.builderState.left.curSrcStartIdx == zeroMJCPCurSrcStartIdx {
o.builderState.left.curSrcStartIdx = leftGroup.rowStartIdx
}
// Loop over every row in the group.
for ; o.builderState.left.curSrcStartIdx < leftGroup.rowEndIdx; o.builderState.left.curSrcStartIdx++ {
// Repeat each row numRepeats times.
srcStartIdx = o.builderState.left.curSrcStartIdx
// {{if _HAS_SELECTION}}
srcStartIdx = sel[srcStartIdx]
// {{end}}
repeatsLeft := leftGroup.numRepeats - o.builderState.left.numRepeatsIdx
toAppend := repeatsLeft
if outStartIdx+toAppend > o.output.Capacity() {
toAppend = o.output.Capacity() - outStartIdx
}
// {{if or _JOIN_TYPE.IsRightOuter _JOIN_TYPE.IsRightAnti}}
// {{/*
// Null groups on the left can only occur with RIGHT OUTER and FULL
// OUTER (for both of which IsRightOuter is true) and RIGHT ANTI joins.
// For other joins, we're omitting this check.
// */}}
if leftGroup.nullGroup {
outNulls.SetNullRange(outStartIdx, outStartIdx+toAppend)
outStartIdx += toAppend
} else
// {{end}}
{
if srcNulls.NullAt(srcStartIdx) {
outNulls.SetNullRange(outStartIdx, outStartIdx+toAppend)
outStartIdx += toAppend
} else {
val = srcCol.Get(srcStartIdx)
for i := 0; i < toAppend; i++ {
execgen.SET(outCol, outStartIdx, val)
outStartIdx++
}
}
}
if toAppend < repeatsLeft {
// We didn't materialize all the rows in the group so save state and
// move to the next column.
o.builderState.left.numRepeatsIdx += toAppend
if colIdx == len(input.sourceTypes)-1 {
return
}
o.builderState.left.setBuilderColumnState(initialBuilderState)
continue LeftColLoop
}
o.builderState.left.numRepeatsIdx = zeroMJCPNumRepeatsIdx
}
o.builderState.left.curSrcStartIdx = zeroMJCPCurSrcStartIdx
}
o.builderState.left.groupsIdx = zeroMJCPGroupsIdx
// {{end}}
}
// {{end}}
default:
colexecerror.InternalError(errors.AssertionFailedf("unhandled type %s", input.sourceTypes[colIdx].String()))
}
// {{end}}
// {{/*
}
// */}}
// buildLeftGroupsFromBatch takes a []group and expands each group into the
// output by repeating each row in the group numRepeats times. For example,
// given an input table:
// L1 | L2
// --------
// 1 | a
// 1 | b
// and leftGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}]
// then buildLeftGroupsFromBatch expands this to
// L1 | L2
// --------
// 1 | a
// 1 | a
// 1 | a
// 1 | b
// 1 | b
// 1 | b
// Note: this is different from buildRightGroupsFromBatch in that each row of
// group is repeated numRepeats times, instead of a simple copy of the group as
// a whole.
// SIDE EFFECTS: writes into o.output.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) buildLeftGroupsFromBatch(
leftGroups []group, input *mergeJoinInput, batch coldata.Batch, destStartIdx int,
) {
sel := batch.Selection()
initialBuilderState := o.builderState.left
o.unlimitedAllocator.PerformOperation(
o.output.ColVecs()[:len(input.sourceTypes)],
func() {
// Loop over every column.
LeftColLoop:
for colIdx := range input.sourceTypes {
outStartIdx := destStartIdx
out := o.output.ColVec(colIdx)
var src coldata.Vec
if batch.Length() > 0 {
src = batch.ColVec(colIdx)
}
if sel != nil {
_LEFT_SWITCH(_JOIN_TYPE, true)
} else {
_LEFT_SWITCH(_JOIN_TYPE, false)
}
o.builderState.left.setBuilderColumnState(initialBuilderState)
}
o.builderState.left.reset()
},
)
}
// {{/*
// This code snippet builds the output corresponding to the right side (i.e. is
// the main body of buildRightGroupsFromBatch()).
func _RIGHT_SWITCH(_JOIN_TYPE joinTypeInfo, _HAS_SELECTION bool) { // */}}
// {{define "rightSwitch" -}}
var srcNulls *coldata.Nulls
if src != nil {
srcNulls = src.Nulls()
}
outNulls := out.Nulls()
switch input.canonicalTypeFamilies[colIdx] {
// {{range $.Global.Overloads}}
case _CANONICAL_TYPE_FAMILY:
switch input.sourceTypes[colIdx].Width() {
// {{range .WidthOverloads}}
case _TYPE_WIDTH:
var srcCol _GOTYPESLICE
if src != nil {
srcCol = src.TemplateType()
}
outCol := out.TemplateType()
// Loop over every group.
for ; o.builderState.right.groupsIdx < len(rightGroups); o.builderState.right.groupsIdx++ {
rightGroup := &rightGroups[o.builderState.right.groupsIdx]
// Repeat every group numRepeats times.
for ; o.builderState.right.numRepeatsIdx < rightGroup.numRepeats; o.builderState.right.numRepeatsIdx++ {
if o.builderState.right.curSrcStartIdx == zeroMJCPCurSrcStartIdx {
o.builderState.right.curSrcStartIdx = rightGroup.rowStartIdx
}
toAppend := rightGroup.rowEndIdx - o.builderState.right.curSrcStartIdx
if outStartIdx+toAppend > o.output.Capacity() {
toAppend = o.output.Capacity() - outStartIdx
}
// {{if _JOIN_TYPE.IsLeftOuter}}
// {{/*
// Null groups on the right can only occur with LEFT OUTER and FULL
// OUTER joins for both of which IsLeftOuter is true. For other joins,
// we're omitting this check.
// */}}
if rightGroup.nullGroup {
outNulls.SetNullRange(outStartIdx, outStartIdx+toAppend)
} else
// {{end}}
{
// Optimization in the case that group length is 1, use assign
// instead of copy.
if toAppend == 1 {
// {{if _HAS_SELECTION}}
srcIdx := sel[o.builderState.right.curSrcStartIdx]
// {{else}}
srcIdx := o.builderState.right.curSrcStartIdx
// {{end}}
if srcNulls.NullAt(srcIdx) {
outNulls.SetNull(outStartIdx)
} else {
v := srcCol.Get(srcIdx)
execgen.SET(outCol, outStartIdx, v)
}
} else {
out.Copy(
coldata.CopySliceArgs{
SliceArgs: coldata.SliceArgs{
Src: src,
Sel: sel,
DestIdx: outStartIdx,
SrcStartIdx: o.builderState.right.curSrcStartIdx,
SrcEndIdx: o.builderState.right.curSrcStartIdx + toAppend,
},
},
)
}
}
outStartIdx += toAppend
// If we haven't materialized all the rows from the group, then we are
// done with the current column.
if toAppend < rightGroup.rowEndIdx-o.builderState.right.curSrcStartIdx {
// If it's the last column, save state and return.
if colIdx == len(input.sourceTypes)-1 {
o.builderState.right.curSrcStartIdx += toAppend
return
}
// Otherwise, reset to the initial state and begin the next column.
o.builderState.right.setBuilderColumnState(initialBuilderState)
continue RightColLoop
}
o.builderState.right.curSrcStartIdx = zeroMJCPCurSrcStartIdx
}
o.builderState.right.numRepeatsIdx = zeroMJCPNumRepeatsIdx
}
o.builderState.right.groupsIdx = zeroMJCPGroupsIdx
// {{end}}
}
// {{end}}
default:
colexecerror.InternalError(errors.AssertionFailedf("unhandled type %s", input.sourceTypes[colIdx].String()))
}
// {{end}}
// {{/*
}
// */}}
// buildRightGroupsFromBatch takes a []group and repeats each group numRepeats
// times. For example, given an input table:
// R1 | R2
// --------
// 1 | a
// 1 | b
// and rightGroups = [{startIdx: 0, endIdx: 2, numRepeats: 3}]
// then buildRightGroups expands this to
// R1 | R2
// --------
// 1 | a
// 1 | b
// 1 | a
// 1 | b
// 1 | a
// 1 | b
// Note: this is different from buildLeftGroupsFromBatch in that each group is
// not expanded but directly copied numRepeats times.
// SIDE EFFECTS: writes into o.output.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) buildRightGroupsFromBatch(
rightGroups []group, colOffset int, input *mergeJoinInput, batch coldata.Batch, destStartIdx int,
) {
initialBuilderState := o.builderState.right
sel := batch.Selection()
o.unlimitedAllocator.PerformOperation(
o.output.ColVecs()[colOffset:colOffset+len(input.sourceTypes)],
func() {
// Loop over every column.
RightColLoop:
for colIdx := range input.sourceTypes {
outStartIdx := destStartIdx
out := o.output.ColVec(colIdx + colOffset)
var src coldata.Vec
if batch.Length() > 0 {
src = batch.ColVec(colIdx)
}
if sel != nil {
_RIGHT_SWITCH(_JOIN_TYPE, true)
} else {
_RIGHT_SWITCH(_JOIN_TYPE, false)
}
o.builderState.right.setBuilderColumnState(initialBuilderState)
}
o.builderState.right.reset()
})
}
// probe is where we generate the groups slices that are used in the build
// phase. We do this by first assuming that every row in both batches
// contributes to the cross product. Then, with every equality column, we
// filter out the rows that don't contribute to the cross product (i.e. they
// don't have a matching row on the other side in the case of an inner join),
// and set the correct cardinality.
// Note that in this phase, we do this for every group, except the last group
// in the batch.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) probe(ctx context.Context) {
o.groups.reset(o.proberState.lIdx, o.proberState.lLength, o.proberState.rIdx, o.proberState.rLength)
lSel := o.proberState.lBatch.Selection()
rSel := o.proberState.rBatch.Selection()
if lSel != nil {
if rSel != nil {
o.probeBodyLSeltrueRSeltrue(ctx)
} else {
o.probeBodyLSeltrueRSelfalse(ctx)
}
} else {
if rSel != nil {
o.probeBodyLSelfalseRSeltrue(ctx)
} else {
o.probeBodyLSelfalseRSelfalse(ctx)
}
}
}
// setBuilderSourceToBufferedGroup sets up the builder state to use the
// buffered group.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) setBuilderSourceToBufferedGroup(ctx context.Context) {
o.builderState.buildFrom = mjBuildFromBufferedGroup
o.bufferedGroup.helper.setupBuilder()
o.builderState.totalOutCountFromBufferedGroup = o.bufferedGroup.helper.calculateOutputCount()
o.builderState.alreadyEmittedFromBufferedGroup = 0
// We cannot yet reset the buffered groups because the builder will be taking
// input from them. The actual reset will take place on the next call to
// initProberState().
o.bufferedGroup.needToReset = true
}
// exhaustLeftSource sets up the builder to process any remaining tuples from
// the left source. It should only be called when the right source has been
// exhausted.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) exhaustLeftSource(ctx context.Context) {
// {{if or _JOIN_TYPE.IsInner (or _JOIN_TYPE.IsLeftSemi _JOIN_TYPE.IsRightSemi)}}
// {{/*
// Remaining tuples from the left source do not have a match, so they are
// ignored in INNER, LEFT SEMI, RIGHT SEMI, and INTERSECT ALL joins (the
// latter has IsLeftSemi == true).
// */}}
// {{end}}
// {{if or _JOIN_TYPE.IsLeftOuter _JOIN_TYPE.IsLeftAnti}}
// The capacity of builder state lGroups and rGroups is always at least 1
// given the init.
o.builderState.lGroups = o.builderState.lGroups[:1]
o.builderState.lGroups[0] = group{
rowStartIdx: o.proberState.lIdx,
rowEndIdx: o.proberState.lLength,
numRepeats: 1,
toBuild: o.proberState.lLength - o.proberState.lIdx,
unmatched: true,
}
// {{if _JOIN_TYPE.IsLeftOuter}}
o.builderState.rGroups = o.builderState.rGroups[:1]
o.builderState.rGroups[0] = group{
rowStartIdx: o.proberState.lIdx,
rowEndIdx: o.proberState.lLength,
numRepeats: 1,
toBuild: o.proberState.lLength - o.proberState.lIdx,
nullGroup: true,
}
// {{end}}
o.proberState.lIdx = o.proberState.lLength
// {{end}}
// {{if or _JOIN_TYPE.IsRightOuter _JOIN_TYPE.IsRightAnti}}
// {{/*
// Remaining tuples from the left source do not have a match, so they are
// ignored in RIGHT OUTER and RIGHT ANTI joins.
// */}}
// {{end}}
}
// exhaustRightSource sets up the builder to process any remaining tuples from
// the right source. It should only be called when the left source has been
// exhausted.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) exhaustRightSource() {
// {{if or _JOIN_TYPE.IsRightOuter _JOIN_TYPE.IsRightAnti}}
// The capacity of builder state lGroups and rGroups is always at least 1
// given the init.
// {{if _JOIN_TYPE.IsRightOuter}}
o.builderState.lGroups = o.builderState.lGroups[:1]
o.builderState.lGroups[0] = group{
rowStartIdx: o.proberState.rIdx,
rowEndIdx: o.proberState.rLength,
numRepeats: 1,
toBuild: o.proberState.rLength - o.proberState.rIdx,
nullGroup: true,
}
// {{end}}
o.builderState.rGroups = o.builderState.rGroups[:1]
o.builderState.rGroups[0] = group{
rowStartIdx: o.proberState.rIdx,
rowEndIdx: o.proberState.rLength,
numRepeats: 1,
toBuild: o.proberState.rLength - o.proberState.rIdx,
unmatched: true,
}
o.proberState.rIdx = o.proberState.rLength
// {{else}}
// Remaining tuples from the right source do not have a match, so they are
// ignored in all joins except for RIGHT OUTER and FULL OUTER.
// {{end}}
}
// calculateOutputCount uses the toBuild field of each group and the output
// batch size to determine the output count. Note that as soon as a group is
// materialized partially or fully to output, its toBuild field is updated
// accordingly.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) calculateOutputCount(groups []group) int {
count := o.builderState.outCount
for i := 0; i < len(groups); i++ {
// {{if or _JOIN_TYPE.IsLeftAnti _JOIN_TYPE.IsRightAnti}}
if !groups[i].unmatched {
// "Matched" groups are not outputted in LEFT ANTI, RIGHT ANTI,
// and EXCEPT ALL joins (for the latter IsLeftAnti == true), so
// they do not contribute to the output count.
continue
}
// {{end}}
count += groups[i].toBuild
groups[i].toBuild = 0
if count > o.output.Capacity() {
groups[i].toBuild = count - o.output.Capacity()
count = o.output.Capacity()
return count
}
}
o.builderState.outFinished = true
return count
}
// build creates the cross product, and writes it to the output member.
func (o *mergeJoin_JOIN_TYPE_STRINGOp) build(ctx context.Context) {
outStartIdx := o.builderState.outCount
switch o.builderState.buildFrom {
case mjBuildFromBatch:
// {{if or _JOIN_TYPE.IsRightSemi _JOIN_TYPE.IsRightAnti}}
o.builderState.outCount = o.calculateOutputCount(o.builderState.rGroups)
// {{else}}
o.builderState.outCount = o.calculateOutputCount(o.builderState.lGroups)
// {{end}}
if o.output.Width() != 0 && o.builderState.outCount > outStartIdx {
// We will be actually building the output if we have columns in the output
// batch (meaning that we're not doing query like 'SELECT count(*) ...')
// and when builderState.outCount has increased (meaning that we have
// something to build).
colOffsetForRightGroups := 0
// {{if not (or _JOIN_TYPE.IsRightSemi _JOIN_TYPE.IsRightAnti)}}
o.buildLeftGroupsFromBatch(o.builderState.lGroups, &o.left, o.proberState.lBatch, outStartIdx)
colOffsetForRightGroups = len(o.left.sourceTypes)
_ = colOffsetForRightGroups
// {{end}}
// {{if not (or _JOIN_TYPE.IsLeftSemi _JOIN_TYPE.IsLeftAnti)}}
o.buildRightGroupsFromBatch(o.builderState.rGroups, colOffsetForRightGroups, &o.right, o.proberState.rBatch, outStartIdx)
// {{end}}
}
case mjBuildFromBufferedGroup:
willEmit := o.builderState.totalOutCountFromBufferedGroup - o.builderState.alreadyEmittedFromBufferedGroup
if o.builderState.outCount+willEmit > o.output.Capacity() {
willEmit = o.output.Capacity() - o.builderState.outCount
} else {
o.builderState.outFinished = true
}
o.builderState.outCount += willEmit
o.builderState.alreadyEmittedFromBufferedGroup += willEmit
if o.output.Width() != 0 && willEmit > 0 {
// {{if not (or _JOIN_TYPE.IsRightSemi _JOIN_TYPE.IsRightAnti)}}
o.bufferedGroup.helper.buildFromLeftInput(ctx, outStartIdx)
// {{end}}
// {{if not (or _JOIN_TYPE.IsLeftSemi _JOIN_TYPE.IsLeftAnti)}}
o.bufferedGroup.helper.buildFromRightInput(ctx, outStartIdx)
// {{end}}
}
default:
colexecerror.InternalError(errors.AssertionFailedf("unsupported mjBuildFrom %d", o.builderState.buildFrom))
}
}
// {{/*
// This code snippet is executed when at least one of the input sources has
// been exhausted. It processes any remaining tuples and then sets up the
// builder.
func _SOURCE_FINISHED_SWITCH(_JOIN_TYPE joinTypeInfo) { // */}}
// {{define "sourceFinishedSwitch" -}}
o.outputReady = true
o.builderState.buildFrom = mjBuildFromBatch
// {{if or $.JoinType.IsInner (or $.JoinType.IsLeftSemi $.JoinType.IsRightSemi)}}
o.setBuilderSourceToBufferedGroup(ctx)
// {{else}}
// Next, we need to make sure that builder state is set up for a case when
// neither exhaustLeftSource nor exhaustRightSource is called below. In such
// scenario the merge joiner is done, so it'll be outputting zero-length
// batches from now on.
o.builderState.lGroups = o.builderState.lGroups[:0]
o.builderState.rGroups = o.builderState.rGroups[:0]
// {{end}}
// {{if or $.JoinType.IsLeftOuter $.JoinType.IsLeftAnti}}
// At least one of the sources is finished. If it was the right one,
// then we need to emit remaining tuples from the left source with
// nulls corresponding to the right one. But if the left source is
// finished, then there is nothing left to do.
if o.proberState.lIdx < o.proberState.lLength {
o.exhaustLeftSource(ctx)
// We unset o.outputReady here because we want to put as many unmatched
// tuples from the left into the output batch. Once outCount reaches the
// desired output batch size, the output will be returned.
o.outputReady = false
}
// {{end}}
// {{if or $.JoinType.IsRightOuter $.JoinType.IsRightAnti}}
// At least one of the sources is finished. If it was the left one,
// then we need to emit remaining tuples from the right source with
// nulls corresponding to the left one. But if the right source is
// finished, then there is nothing left to do.
if o.proberState.rIdx < o.proberState.rLength {
o.exhaustRightSource()
// We unset o.outputReady here because we want to put as many unmatched
// tuples from the right into the output batch. Once outCount reaches the
// desired output batch size, the output will be returned.
o.outputReady = false
}
// {{end}}
// {{end}}
// {{/*
}
// */}}
func (o *mergeJoin_JOIN_TYPE_STRINGOp) Next(ctx context.Context) coldata.Batch {
o.output, _ = o.unlimitedAllocator.ResetMaybeReallocate(
o.outputTypes, o.output, 1 /* minCapacity */, o.memoryLimit,
)
o.bufferedGroup.helper.output = o.output
for {
switch o.state {
case mjEntry:
o.initProberState(ctx)
if o.nonEmptyBufferedGroup() {
o.state = mjFinishBufferedGroup
break
}
if o.sourceFinished() {
o.state = mjSourceFinished
break
}
o.state = mjProbe
case mjSourceFinished:
_SOURCE_FINISHED_SWITCH(_JOIN_TYPE)
o.state = mjBuild
case mjFinishBufferedGroup:
o.finishProbe(ctx)
o.setBuilderSourceToBufferedGroup(ctx)
o.state = mjBuild
case mjProbe:
o.probe(ctx)
o.setBuilderSourceToBatch()
o.state = mjBuild
case mjBuild:
o.build(ctx)
if o.builderState.outFinished {
o.state = mjEntry
o.builderState.outFinished = false
}
if o.outputReady || o.builderState.outCount == o.output.Capacity() {
if o.builderState.outCount == 0 {
// We have already fully emitted the result of the join, so we
// transition to "finished" state.
o.state = mjDone
continue
}
o.output.SetLength(o.builderState.outCount)
// Reset builder out count.
o.builderState.outCount = 0
o.outputReady = false
return o.output
}
case mjDone:
// Note that resetting of buffered group will close disk queues
// (if there are any).
if o.bufferedGroup.needToReset {
o.bufferedGroup.helper.Reset(ctx)
o.bufferedGroup.needToReset = false
}
return coldata.ZeroBatch
default:
colexecerror.InternalError(errors.AssertionFailedf("unexpected merge joiner state in Next: %v", o.state))
}
}
}
|
package main
import (
"bytes"
"database/sql"
"encoding/hex"
"errors"
"math/big"
"path/filepath"
"sort"
"sync"
"github.com/hectorchu/gonano/rpc"
"github.com/hectorchu/gonano/wallet"
"github.com/hectorchu/nano-token-protocol/tokenchain"
_ "github.com/mattn/go-sqlite3"
"github.com/mitchellh/go-homedir"
"github.com/spf13/viper"
)
type tokenChainManager struct {
m, mDB sync.Mutex
chains map[string]*tokenchain.Chain
tokens map[string]*tokenchain.Token
}
func newTokenChainManager() *tokenChainManager {
return &tokenChainManager{
chains: make(map[string]*tokenchain.Chain),
tokens: make(map[string]*tokenchain.Token),
}
}
func (tcm *tokenChainManager) getTokens() (tokens []*tokenchain.Token) {
tokens = make([]*tokenchain.Token, 0, len(tcm.tokens))
for _, token := range tcm.tokens {
tokens = append(tokens, token)
}
sort.Slice(tokens, func(i, j int) bool {
return bytes.Compare(tokens[i].Hash(), tokens[j].Hash()) < 0
})
return
}
func (tcm *tokenChainManager) getBalance(token *tokenchain.Token, account string) (balance *big.Int) {
tcm.m.Lock()
balance = token.Balance(account)
tcm.m.Unlock()
return
}
func (tcm *tokenChainManager) createToken(
chain *tokenchain.Chain, a *wallet.Account, name string, supply *big.Int, decimals byte,
) (token *tokenchain.Token, err error) {
if chain == nil {
if chain, err = tokenchain.NewChain(rpcURL); err != nil {
return
}
if _, err = a.Send(chain.Address(), big.NewInt(1)); err != nil {
return
}
if err = chain.WaitForOpen(); err != nil {
return
}
tcm.m.Lock()
tcm.chains[chain.Address()] = chain
tcm.m.Unlock()
}
client := rpc.Client{URL: rpcURL}
rep, err := client.AccountRepresentative(a.Address())
if err != nil {
return
}
tcm.m.Lock()
token, err = tokenchain.TokenGenesis(chain, a, name, supply, decimals)
tcm.m.Unlock()
if err != nil {
return
}
if _, err = a.ChangeRep(rep); err != nil {
return
}
tcm.tokens[string(token.Hash())] = token
return
}
func (tcm *tokenChainManager) transferToken(
token *tokenchain.Token, a *wallet.Account, account string, amount *big.Int,
) (hash rpc.BlockHash, err error) {
client := rpc.Client{URL: rpcURL}
rep, err := client.AccountRepresentative(a.Address())
if err != nil {
return
}
tcm.m.Lock()
hash, err = token.Transfer(a, account, amount)
tcm.m.Unlock()
if err != nil {
return
}
_, err = a.ChangeRep(rep)
return
}
func (tcm *tokenChainManager) fetchChain(address string) (chain *tokenchain.Chain, err error) {
chain, ok := tcm.chains[address]
if ok {
return
}
if chain, err = tokenchain.LoadChain(address, rpcURL); err != nil {
return
}
tcm.m.Lock()
tcm.chains[address] = chain
if err = chain.Parse(); err == nil {
err = tcm.withDB(chain.SaveState)
}
tcm.m.Unlock()
return
}
func (tcm *tokenChainManager) fetchToken(hash rpc.BlockHash) (token *tokenchain.Token, err error) {
token, ok := tcm.tokens[string(hash)]
if ok {
return
}
client := rpc.Client{URL: rpcURL}
block, err := client.BlockInfo(hash)
if err != nil {
return
}
chain, err := tcm.fetchChain(block.BlockAccount)
if err != nil {
return
}
tcm.m.Lock()
token, err = chain.Token(hash)
tcm.m.Unlock()
if err != nil {
return
}
tcm.tokens[string(hash)] = token
return
}
func (tcm *tokenChainManager) isChainAddress(address string) (ok bool) {
tcm.m.Lock()
_, ok = tcm.chains[address]
tcm.m.Unlock()
return
}
func (tcm *tokenChainManager) withDB(cb func(*sql.DB) error) (err error) {
home, err := homedir.Dir()
if err != nil {
return
}
tcm.mDB.Lock()
defer tcm.mDB.Unlock()
db, err := sql.Open("sqlite3", filepath.Join(home, "tokenchains.db"))
if err != nil {
return
}
defer db.Close()
return cb(db)
}
func (tcm *tokenChainManager) load() (err error) {
tcm.withDB(tcm.loadChains)
if err = tcm.loadTokens(); err != nil {
return
}
wsClient.subscribe(func(block *rpc.Block) {
tcm.m.Lock()
if chain, ok := tcm.chains[block.Account]; ok {
if chain.Parse() == nil {
tcm.withDB(chain.SaveState)
}
}
tcm.m.Unlock()
})
go func() {
tcm.m.Lock()
for _, chain := range tcm.chains {
if chain.Parse() == nil {
tcm.withDB(chain.SaveState)
}
}
tcm.m.Unlock()
}()
return
}
func (tcm *tokenChainManager) loadChains(db *sql.DB) (err error) {
rows, err := db.Query("SELECT seed FROM chains")
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var seedStr string
if err := rows.Scan(&seedStr); err != nil {
return err
}
seed, err := hex.DecodeString(seedStr)
if err != nil {
return err
}
chain, err := tokenchain.NewChainFromSeed(seed, rpcURL)
if err != nil {
return err
}
if err = chain.LoadState(db); err != nil {
return err
}
tcm.chains[chain.Address()] = chain
}
return rows.Err()
}
func (tcm *tokenChainManager) loadTokens() (err error) {
for _, h := range viper.GetStringSlice("tokens") {
hash, err := hex.DecodeString(h)
if err != nil {
return err
}
for _, chain := range tcm.chains {
if token, err := chain.Token(hash); err == nil {
tcm.tokens[string(hash)] = token
break
}
}
if _, err = tcm.fetchToken(hash); err != nil {
return err
}
}
return
}
func (tcm *tokenChainManager) save() (err error) {
tokens := make([]string, 0, len(tcm.tokens))
for hash := range tcm.tokens {
tokens = append(tokens, rpc.BlockHash(hash).String())
}
viper.Set("tokens", tokens)
return viper.WriteConfig()
}
func (tcm *tokenChainManager) amountToString(amount *big.Int, decimals byte) string {
x := big.NewInt(10)
exp := x.Exp(x, big.NewInt(int64(decimals)), nil)
r := new(big.Rat).SetFrac(amount, exp)
return r.FloatString(int(decimals))
}
func (tcm *tokenChainManager) amountFromString(s string, decimals byte) (amount *big.Int, err error) {
x := big.NewInt(10)
exp := x.Exp(x, big.NewInt(int64(decimals)), nil)
r, ok := new(big.Rat).SetString(s)
if !ok {
return nil, errors.New("Unable to parse amount")
}
r = r.Mul(r, new(big.Rat).SetInt(exp))
if !r.IsInt() {
return nil, errors.New("Unable to parse amount")
}
return r.Num(), nil
}
|
package main
import (
"strings"
"git.apache.org/thrift.git/lib/go/thrift"
"fmt"
"log"
"echo-server/thrift-example-with-go/example"
)
type FormatDataImpl struct{}
func (fdi *FormatDataImpl) DoFormat(data *example.Data) (r *example.Data, err error) {
var rData example.Data
fmt.Println("receive:=>", data.Text)
rData.Text = "server handler result:=>" + strings.ToUpper(data.Text)
return &rData, nil
}
const (
HOST = "localhost"
PORT = "8080"
)
func main() {
handler := &FormatDataImpl{}
processor := example.NewFormatDataProcessor(handler)
serverTransport, err := thrift.NewTServerSocket(HOST + ":" + PORT)
if err != nil {
log.Fatalln("Error:", err)
}
transportFactory := thrift.NewTFramedTransportFactory(thrift.NewTTransportFactory())
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
server := thrift.NewTSimpleServer4(processor, serverTransport, transportFactory, protocolFactory)
fmt.Println("Running at:", HOST+":"+PORT)
server.Serve()
}
|
package validator_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/darren-west/app/utils/validator"
)
type Foo struct {
Bar Bar
Item string
Something int
}
type Bar struct {
Item string
Another int
}
func (f Foo) IsValid() (err error) {
if f.Bar == (Bar{}) {
err = fmt.Errorf("missing field: bar")
return
}
return
}
func (Bar) IsValid() (err error) {
return
}
func TestValidateMissingField(t *testing.T) {
err := validator.Validator{}.IsValid(&Foo{Item: "Something"})
assert.EqualError(t, err, "missing field: bar")
}
func TestValidatorIncorrectType(t *testing.T) {
err := validator.Validator{}.IsValid(struct{}{})
assert.EqualError(t, err, "invalid input: input type struct, expecting ptr")
}
func TestValidatorIncorrectTypeNil(t *testing.T) {
err := validator.Validator{}.IsValid(nil)
assert.EqualError(t, err, "invalid input: input is nil")
}
|
package helpers_test
import (
"testing"
"github.com/go-jstmpl/go-jstmpl/helpers"
)
func TestAdd(t *testing.T) {
type Input struct {
a int
b int
}
type Case struct {
Input Input
Expected int
}
cases := []Case{
{
Input: Input{
a: 2,
b: 1,
},
Expected: 3,
},
{
Input: Input{
a: 1,
b: 2,
},
Expected: 3,
},
{
Input: Input{
a: 1,
b: 0,
},
Expected: 1,
},
{
Input: Input{
a: 0,
b: 1,
},
Expected: 1,
},
{
Input: Input{
a: -1,
b: 0,
},
Expected: -1,
},
{
Input: Input{
a: 0,
b: -1,
},
Expected: -1,
},
{
Input: Input{
a: 1,
b: -1,
},
Expected: 0,
},
{
Input: Input{
a: -1,
b: 1,
},
Expected: 0,
},
}
for _, c := range cases {
actual := helpers.Add(c.Input.a, c.Input.b)
if actual != c.Expected {
t.Errorf("Add(%d, %d) expected %d, but actual %d", c.Input.a, c.Input.b, c.Expected, actual)
}
}
}
func TestSub(t *testing.T) {
type Input struct {
a int
b int
}
type Case struct {
Input Input
Expected int
}
cases := []Case{
{
Input: Input{
a: 2,
b: 1,
},
Expected: 1,
},
{
Input: Input{
a: 1,
b: 2,
},
Expected: -1,
},
{
Input: Input{
a: 1,
b: 0,
},
Expected: 1,
},
{
Input: Input{
a: 0,
b: 1,
},
Expected: -1,
},
{
Input: Input{
a: -1,
b: 0,
},
Expected: -1,
},
{
Input: Input{
a: 0,
b: -1,
},
Expected: 1,
},
{
Input: Input{
a: 1,
b: -1,
},
Expected: 2,
},
{
Input: Input{
a: -1,
b: 1,
},
Expected: -2,
},
}
for _, c := range cases {
actual := helpers.Sub(c.Input.a, c.Input.b)
if actual != c.Expected {
t.Errorf("Sub(%d, %d) expected %d, but actual %d", c.Input.a, c.Input.b, c.Expected, actual)
}
}
}
|
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information
package storj
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/storj/location"
)
func TestPlacement_Geofencing(t *testing.T) {
cases := []struct {
name string
country location.CountryCode
placement PlacementConstraint
expected bool
}{
{
name: "US matches US selector",
country: location.UnitedStates,
placement: US,
expected: true,
},
{
name: "Germany is EU",
country: location.Germany,
placement: EU,
expected: true,
},
{
name: "US is not eu",
country: location.UnitedStates,
placement: EU,
expected: false,
},
{
name: "Lower case country code is handled",
country: location.Germany,
placement: EU,
expected: true,
},
{
name: "Empty country doesn't match region",
country: location.None,
placement: EU,
expected: false,
},
{
name: "Empty country doesn't match country",
country: location.None,
placement: US,
expected: false,
},
{
name: "Russia doesn't match NR",
country: location.Russia,
placement: NR,
expected: false,
},
{
name: "Germany is allowed with NR",
country: location.Germany,
placement: NR,
expected: true,
},
{
name: "Invalid placement should return false",
country: location.Germany,
placement: NR + 1,
expected: false,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
assert.Equal(t, c.expected, c.placement.AllowedCountry(c.country))
})
}
}
func TestPlacement_SQLConversion(t *testing.T) {
p := EEA
value, err := p.Value()
require.NoError(t, err)
res := new(PlacementConstraint)
err = res.Scan(value)
require.NoError(t, err)
require.Equal(t, EEA, *res)
err = res.Scan(nil)
require.NoError(t, err)
require.Equal(t, EveryCountry, *res)
err = res.Scan("")
require.Error(t, err)
}
var sink int
func BenchmarkPlacementConstraint_AllowedCountry(b *testing.B) {
constraints := []PlacementConstraint{
EveryCountry,
EU,
EEA,
US,
DE,
InvalidPlacement,
NR,
}
for i := 0; i < b.N; i++ {
for _, c := range constraints {
if c.AllowedCountry(location.Russia) {
sink++
}
}
}
}
|
package main
import (
"golang.org/x/net/webdav"
"log"
"net/http"
)
func main() {
h := &webdav.Handler{
// 在内存中的文件系统,重启应用后就消失
// FileSystem: webdav.NewMemFS(),
// 挂载本地文件系统
FileSystem: webdav.Dir("."),
LockSystem: webdav.NewMemLS(),
// 设置log
Logger: func(r *http.Request, err error) {
log.Printf("[dav] %-10s%-30s%v", r.Method, r.URL.Path, err)
},
}
// 挂载webdav服务:net use P: http://localhost:5555
// 取消挂载:net use P: /del /y
http.HandleFunc("/", h.ServeHTTP)
http.ListenAndServe(":5555", nil)
}
|
package main
import (
"fmt"
"time"
)
func main() {
c1 := make(chan string)
c2 := make(chan string)
go func() {
for {
c1 <- "from 1"
time.Sleep(time.Second * 2)
}
}()
go func() {
for {
c2 <- "from 2"
time.Sleep(time.Second * 3)
}
}()
go func() {
for {
/*
select picks the first channel that is ready and receives from it
(or sends to it).
*/
select {
case msg1 := <-c1:
fmt.Println(msg1)
case msg2 := <-c2:
fmt.Println(msg2)
case <-time.After(time.Second):
/*
time.After creates a channel, and after the given duration, will
send the current time on it (we weren’t interested in the time,
so we didn’t store it in a variable).
*/
fmt.Println("timeout")
default:
fmt.Println("nothing ready")
time.Sleep(time.Second)
}
}
}()
var input string
fmt.Scanln(&input)
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package signinit
import (
"context"
"crypto"
"fmt"
"time"
"github.com/sassoftware/relic/cmdline/shared"
"github.com/sassoftware/relic/lib/audit"
"github.com/sassoftware/relic/lib/certloader"
"github.com/sassoftware/relic/signers"
"github.com/sassoftware/relic/signers/sigerrors"
"github.com/sassoftware/relic/token"
)
func Init(ctx context.Context, mod *signers.Signer, tok token.Token, keyName string, hash crypto.Hash, flags *signers.FlagValues) (*certloader.Certificate, *signers.SignOpts, error) {
var key token.Key
var err error
if tctx, ok := tok.(keyGetter); ok {
key, err = tctx.GetKeyContext(ctx, keyName)
} else {
key, err = tok.GetKey(keyName)
}
if err != nil {
return nil, nil, err
}
kconf := key.Config()
// parse certificates
var x509cert, pgpcert string
if mod.CertTypes&signers.CertTypeX509 != 0 {
if kconf.X509Certificate == "" {
return nil, nil, sigerrors.ErrNoCertificate{"x509"}
}
x509cert = kconf.X509Certificate
}
if mod.CertTypes&signers.CertTypePgp != 0 {
if kconf.PgpCertificate == "" {
return nil, nil, sigerrors.ErrNoCertificate{"pgp"}
}
pgpcert = kconf.PgpCertificate
}
cert, err := certloader.LoadTokenCertificates(key, x509cert, pgpcert)
if err != nil {
return nil, nil, err
}
cert.KeyName = keyName
// create audit info
auditInfo := audit.New(keyName, mod.Name, hash)
now := time.Now().UTC()
auditInfo.SetTimestamp(now)
if cert.Leaf != nil {
auditInfo.SetX509Cert(cert.Leaf)
}
if cert.PgpKey != nil {
auditInfo.SetPgpCert(cert.PgpKey)
}
if kconf.Timestamp {
cert.Timestamper, err = GetTimestamper()
if err != nil {
return nil, nil, err
}
}
opts := signers.SignOpts{
Hash: hash,
Time: now,
Audit: auditInfo,
Flags: flags,
}
opts = opts.WithContext(ctx)
return cert, &opts, nil
}
func PublishAudit(info *audit.Info) error {
aconf := shared.CurrentConfig.Amqp
if aconf != nil && aconf.URL != "" {
if err := info.Publish(aconf); err != nil {
return fmt.Errorf("failed to publish audit log: %s", err)
}
}
return nil
}
type keyGetter interface {
GetKeyContext(context.Context, string) (token.Key, error)
}
|
package packer
import (
"encoding/binary"
"fmt"
"github.com/golang/snappy"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
)
const (
// The original qvm-copy uses LongMax (9223372036854775807 = 9223 PB) as
// max. I choose something smaller, 1TB ought to suffice
MaxTransfer = 1e12
)
type Receiver struct {
in io.Reader
out BufferedWriter
useTempFile bool // Should it unpack into tempfiles first?
totalBytes uint64 // counter for total bytes received
totalFiles uint64 // counter for total files received
filesLimit int // a limit on the number of files to receive
byteLimit uint64 // limit on the number of bytes to receive
index uint32 // index count,for requesting
requestList []uint32 // list of files (indexes) to request
toDelete map[string]struct{} // list of local files to delete
dirStack []string // stack of directories we visit/create
deferredPermissions []*fileHeader
// place to store stuff in. Defaults to empty string, as we're normally
// root-jailed, but is used for testing
root string
opts *Options
}
// NewReceiver creates a new receiver
func NewReceiver(in io.Reader, out io.Writer) (*Receiver, error) {
v := versionHeader{}
if err := binary.Read(in, binary.LittleEndian, &v); err != nil {
return nil, err
}
if v.Version != 0 {
return nil, fmt.Errorf("unsupported version: %d", v.Version)
}
opts := &Options{
Verbosity: int(v.Verbosity),
CrcUsage: int(v.FileCrcUsage),
Compression: int(v.Compression),
}
if opts.Compression > CompressionSnappy {
return nil, fmt.Errorf("Unsupported compression format %d", opts.Compression)
}
if opts.Compression == CompressionSnappy {
in = snappy.NewReader(in)
}
if opts.Verbosity >= 3 {
log.Printf("protocol version: %d, verbosity %d, snappy: %v, crc: %d",
v.Version, opts.Verbosity, opts.Compression != 0, opts.CrcUsage)
}
return &Receiver{
in: in,
out: NewConfigurableWriter(opts.Compression == CompressionSnappy, out),
filesLimit: -1,
useTempFile: true,
opts: opts,
toDelete: make(map[string]struct{}),
}, nil
}
func (r *Receiver) Sync() error {
// Receive directories + metadata
if err := r.receiveMetadata(); err != nil {
return fmt.Errorf("Error during phase 0 receive : %v", err)
}
// Request files
if err := r.requestFiles(); err != nil {
return fmt.Errorf("Error during phase 2 file request: %v", err)
}
// Receive data content
if err := r.receiveFullData(); err != nil {
return fmt.Errorf("Error during file reception: %v", err)
}
if r.opts.Verbosity >= 3 {
if cm, ok := r.out.(*ConfigurableWriter); ok {
r, c := cm.Stats()
log.Printf("Data sent, raw: %d, compresed: %d", r, c)
}
}
// Fix perms
for _, hdr := range r.deferredPermissions {
hdr.fixTimesAndPerms()
}
for f, _ := range r.toDelete {
info, err := os.Lstat(f)
if err != nil {
log.Printf("Error during deletion: %v", err)
continue
}
if info.IsDir() {
os.RemoveAll(f)
if r.opts.Verbosity >= 4 {
log.Printf("Removed directory %v", f)
}
} else {
if err := os.Remove(f); err != nil {
if r.opts.Verbosity > 0 {
log.Printf("Failed to delete %v: %v", f, err)
}
}
if r.opts.Verbosity >= 4 {
log.Printf("Removed %v", f)
}
}
}
return nil
}
// request schedules a certain index for later retrieval
func (r *Receiver) request(index uint32) {
r.requestList = append(r.requestList, r.index)
}
// countBytes verifies that the length is within limits, and updates bytecounter
func (r *Receiver) countBytes(length uint64, update bool) error {
if length > MaxTransfer {
return fmt.Errorf("file too large, %d", length)
}
if r.byteLimit != 0 && r.totalBytes > uint64(r.byteLimit)-length {
return fmt.Errorf("file too large, %d", length)
}
if update {
r.totalBytes += length
}
return nil
}
// receiveFileMetadata handles stage-1 metadata for files and symlinks
func (r *Receiver) receiveFileMetadata(hdr *fileHeader) error {
defer func() { r.index++ }()
// Check sizes
if err := r.countBytes(hdr.Data.FileLen, false); err != nil {
return err
}
localFileInfo, err := os.Lstat(hdr.path)
if err != nil && os.IsNotExist(err) {
r.request(r.index)
return nil
}
localFile := newFileHeaderFromStat(hdr.path, localFileInfo)
if diff := localFile.Diff(hdr); len(diff) > 0 {
if r.opts.Verbosity >= 4 {
log.Printf("file diffs for %v: %v", hdr.path, diff)
}
r.request(r.index)
return nil
}
if r.opts.CrcUsage == FileCrcAtimeNsecMetadata ||
r.opts.CrcUsage == FileCrcAtimeNsec {
crc, err := CrcFile(hdr.path, localFileInfo)
if err != nil {
return err
}
if crc != hdr.Data.AtimeNsec {
if r.opts.Verbosity >= 3 {
log.Printf("crc diff on %v (local %d, remote %d)",
hdr.path, crc, hdr.Data.AtimeNsec)
}
r.request(r.index)
}
}
return nil
}
// receiveDirMetadata handles directories (stage 1). Since qvm-sync, as opposed to qvm-copy,
// cannot rely on the destination being empty, we need to handle various
// corner cases (e.g directory exists but is file, or vice versa)
func (r *Receiver) receiveDirMetadata(header *fileHeader) error {
// qvm-copy operates on a 'clean' empty destination, so that one can
// safely assume that if it already exists, this is the second time they
// visit it (backing out), and set the final perms that time around.
// We can't do that, but instead maintain a stack of directories. We
// can consult it to find it if
// 1. we're now backing out of a dir, or,
// 2. We're visiting/creating one for the first time
if r.visitDir(header.path) { // first visit
stat, err := os.Lstat(header.path)
if err == nil {
// If it's not a dir, delete it
if !stat.IsDir() {
return RemoveIfExist(header.path)
}
// We also need ensure that we have permissions in the directory
// this is later set correctly on the second visit
if err := os.Chmod(header.path, 0700); err != nil {
return err
}
// remember the files that were there
return r.snapshotFiles(header.path, false)
}
if os.IsNotExist(err) {
// Dir did not exist (or was removed), just create it
return os.Mkdir(header.path, 0700)
}
// Some other error
return err
}
// second visit
// We can't set the perms here: if we were to set e.g rdonly perms,
// we will be unable to create the full files when they are transmitted.
// So just save the perms for later
r.deferFixTimesAndPerms(header)
return nil
}
func (r *Receiver) receiveRegularFileFullData(hdr *fileHeader) error {
// Check sizes
if err := r.countBytes(hdr.Data.FileLen, true); err != nil {
return err
}
var (
fdOut *os.File
err error
)
if !r.useTempFile {
if fdOut, err = os.OpenFile(hdr.path, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0); err != nil {
return err
}
// we can't do deferred fdOut.Close, because we need to fix perms
// _after_ file has been closed
if err := CopyFile(r.in, fdOut, int(hdr.Data.FileLen)); err != nil {
fdOut.Close()
return err
}
fdOut.Close()
return hdr.fixTimesAndPerms()
}
// Create tempfile
if fdOut, err = ioutil.TempFile(".", "qvm-*"); err != nil {
return err
}
defer fdOut.Close()
defer os.Remove(fdOut.Name()) // defer cleanup
if err := CopyFile(r.in, fdOut, int(hdr.Data.FileLen)); err != nil {
return err
}
// This file may already exist.
if err := RemoveIfExist(hdr.path); err != nil {
return err
}
if err := os.Link(fdOut.Name(), hdr.path); err != nil {
return fmt.Errorf("unable to link file : %v", err)
}
return hdr.fixTimesAndPerms()
}
func (r *Receiver) receiveSymlinkFullData(hdr *fileHeader) error {
fileSize := hdr.Data.FileLen
if fileSize > MaxPathLength-1 {
return fmt.Errorf("symlink link-name too long (%d characters)", fileSize)
}
if err := r.countBytes(fileSize, true); err != nil {
return err
}
// a symlink should be small enough to not use CopyFile (buffered)
buf := make([]byte, fileSize)
if _, err := io.ReadFull(r.in, buf); err != nil {
return fmt.Errorf("symlink content read err: %v", err)
}
content := string(buf)
// This file may already exist.
if err := RemoveIfExist(hdr.path); err != nil {
return err
}
if err := os.Symlink(content, hdr.path); err != nil {
return err
}
// OBS! We can't set perms _nor_ times on symlinks. See documentation
// on the methods fixTimesAndPerms and fixTimes
return nil
}
// visitDir either push the path to the stack, or, if the topmost item
// is identical to this path, it pops one item from the stack.
// @return true if this is a new path (push), false if it's the second time around (pop)
func (r *Receiver) visitDir(path string) bool {
if len(r.dirStack) == 0 {
r.dirStack = append(r.dirStack, path)
return true
}
if r.dirStack[len(r.dirStack)-1] != path {
r.dirStack = append(r.dirStack, path)
return true
}
r.dirStack = r.dirStack[:len(r.dirStack)-1]
return false
}
// deferFixTimesAndPerms saves the times and perms for the given path, so that
// we can set that later, when we're done with all file operations on it
func (r *Receiver) deferFixTimesAndPerms(hdr *fileHeader) {
r.deferredPermissions = append(r.deferredPermissions, hdr)
}
func (r *Receiver) processItemMetadata(hdr *fileHeader) error {
var err error
if hdr.isDir() {
err = r.receiveDirMetadata(hdr)
} else if hdr.isSymlink() || hdr.isRegular() {
err = r.receiveFileMetadata(hdr)
} else {
return fmt.Errorf("unknown file Mode %x", hdr.Data.Mode)
}
return err
}
func (r *Receiver) snapshotFiles(dir string, checkRoot bool) error {
// Build up the list of existing files (on the current directory level)
files, err := ioutil.ReadDir(dir)
if err != nil && os.IsNotExist(err) {
return nil
}
if err != nil {
return err
}
for _, f := range files {
fullPath, err := filepath.Abs(filepath.Join(dir, f.Name()))
if err != nil {
return err
}
r.toDelete[fullPath] = struct{}{}
}
// We are supposed to be chrooted, and therefore unable to actually
// delete files arbitrarily. However, better safe than sorry, so this
// program will simply throw an error if it "looks like" we're not in a
// chroot but in an actual root
if checkRoot {
blackList := []string{
"bin", "boot", "dev", "etc", "home", "lost+found",
"media", "mnt", "opt", "proc", "root",
"sbin", "srv", "sys", "usr", "var",
}
for _, nope := range blackList {
if _, exist := r.toDelete[filepath.Join(dir, nope)]; exist {
return fmt.Errorf("file %v in receiver root, bailing out", nope)
}
}
}
return nil
}
func (r *Receiver) removeSnapshot(path string) error {
fullpath, err := filepath.Abs(path)
if err != nil {
return err
}
delete(r.toDelete, fullpath)
return nil
}
func (r *Receiver) receiveMetadata() error {
var lastName string
firstItem := true
for {
hdr, err := unMarshallBinary(r.in)
if err != nil {
return err
}
// Check for end of transfer marker
if hdr.Data.NameLen == 0 {
break
}
r.totalFiles++
if r.filesLimit > 0 && int(r.totalFiles) > r.filesLimit {
return fmt.Errorf("number of files (%d) exceeded limit (%d)", r.totalFiles, r.filesLimit)
}
if firstItem{
// First item should be the directory the remote side is synching
if !hdr.isDir(){
return fmt.Errorf("Expected director as first entry, got %v", hdr.path)
}
if err := r.snapshotFiles(fmt.Sprintf("./%v", hdr.path), true); err != nil {
return fmt.Errorf("snapshot failed: %v", err)
}
firstItem = false
}
r.removeSnapshot(hdr.path)
if err := r.processItemMetadata(hdr); err != nil {
return fmt.Errorf("error processing metadata for %v: %v", hdr.path, err)
} else {
lastName = hdr.path
}
}
if err := r.sendStatusAndCrc(0, lastName); err != nil {
return err
}
return r.out.Flush()
}
func (r *Receiver) receiveFullData() error {
var lastName string
for _, index := range r.requestList {
hdr, err := unMarshallBinary(r.in)
if err != nil {
return err
}
if hdr.isRegular() {
err = r.receiveRegularFileFullData(hdr)
} else if hdr.isSymlink() {
err = r.receiveSymlinkFullData(hdr)
}
if err != nil {
return err
}
lastName = hdr.path
if r.opts.Verbosity >= 4 {
log.Printf("Got file %d (%v)", index, lastName)
}
}
if err := r.sendStatusAndCrc(0, lastName); err != nil {
return err
}
return r.out.Flush()
}
func (r *Receiver) sendStatusAndCrc(code int, lastFilename string) error {
result := &resultHeader{
ErrorCode: uint32(code),
}
if err := result.marshallBinary(r.out); err != nil {
return err
}
extension := &resultHeaderExt{
LastNameLen: uint32(len(lastFilename)) + 1,
LastName: lastFilename,
}
if len(lastFilename) == 0 {
extension.LastNameLen = 0
}
if err := extension.marshallBinary(r.out); err != nil {
return fmt.Errorf("failed sending result extension: %v", err)
}
return nil
}
func (r *Receiver) requestFiles() error {
if r.opts.Verbosity >= 3 {
log.Printf("Requesting %d files", len(r.requestList))
}
if err := binary.Write(r.out, binary.LittleEndian, uint32(len(r.requestList))); err != nil {
return err
}
if err := binary.Write(r.out, binary.LittleEndian, r.requestList); err != nil {
return err
}
return r.out.Flush()
}
|
// Copyright © 2020. All rights reserved.
// Author: Ilya Stroy.
// Contacts: qioalice@gmail.com, https://github.com/qioalice
// License: https://opensource.org/licenses/MIT
package ekatyp
type (
NoCopy struct {}
)
func (*NoCopy) Lock() {}
func (*NoCopy) Unlock() {} |
package util
import (
"fmt"
"github.com/latam-airlines/crane/logger"
"strings"
)
type KeyValue struct {
Key string
Value string
}
func (kv KeyValue) ToString() string {
return fmt.Sprintf("%s=%s", kv.Key, kv.Value)
}
func AddOrReplaceKv(key string, value string, kvs []KeyValue) []KeyValue {
replace := false
for id, kv := range kvs {
if kv.Key == key {
logger.Instance().Debugf("Replacing key %s", kv.Key)
kvs[id].Value = value
replace = true
break
}
}
if !replace {
kvs = append(kvs, KeyValue{Key: key, Value: value})
}
return kvs
}
func KeyValueArrayToMap(kvArray []KeyValue) map[string]string {
kvMap := map[string]string{}
for _, val := range kvArray {
kvMap[val.Key] = val.Value
}
return kvMap
}
func KeyValueArrayToStringArray(kvArray []KeyValue) []string {
kvSlice := []string{}
for _, val := range kvArray {
kvSlice = append(kvSlice, val.ToString())
}
return kvSlice
}
func KeyValueArray(id string, kvs []string) []KeyValue {
kvArray := []KeyValue{}
validateKeyValueAndFilter(kvs, func(splittedKv []string) {
if !strings.Contains(splittedKv[0], ":") {
kvArray = AddOrReplaceKv(splittedKv[0], splittedKv[1], kvArray)
}
})
validateKeyValueAndFilter(kvs, func(splittedKv []string) {
if strings.Contains(splittedKv[0], ":") {
composedKeyParts := strings.Split(splittedKv[0], ":")
if composedKeyParts[0] == id {
kvArray = AddOrReplaceKv(composedKeyParts[1], splittedKv[1], kvArray)
}
}
})
return kvArray
}
func validateKeyValueAndFilter(kvs []string, filterEnv func([]string)) {
for _, elm := range kvs {
splittedKv := strings.SplitN(elm, "=", 2)
if len(splittedKv) != 2 {
logger.Instance().Warnf("Key=Value format invalid of %s. Will ignore this one", splittedKv)
continue
}
filterEnv(splittedKv)
}
}
func ExtractStringOrDefault(params map[string]interface{}, key, def string) string {
val := def
if value, ok := params[key]; ok {
val = fmt.Sprint(value)
}
return val
}
|
package main
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"log"
"strings"
"time"
)
func loadS3Files(svc *s3.S3, bucket, path string, debug *log.Logger) chan map[string]*File {
out := make(chan map[string]*File)
// s3 doesn't like the key to start with /
path = strings.TrimPrefix(path, "/")
go func() {
start := time.Now()
debug.Printf("read s3 - start at %s", start)
f := make(map[string]*File)
trawlS3(svc, path, bucket, path, f, nil, debug)
debug.Printf("read s3 - stop, it took %s", time.Now().Sub(start))
out <- f
close(out)
}()
return out
}
func trawlS3(svc *s3.S3, path string, bucket, prefix string, files map[string]*File, token *string, debug *log.Logger) {
list, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
ContinuationToken: token,
})
if err != nil {
fmt.Println(err)
return
}
for _, object := range list.Contents {
// strip out the full path of the object, begin after path
p := strings.TrimPrefix(*object.Key, path)
p = strings.TrimPrefix(p, "/")
files[p] = &File{
path: p,
size: *object.Size,
mtime: *object.LastModified,
}
}
if *list.IsTruncated {
trawlS3(svc, path, bucket, prefix, files, list.NextContinuationToken, debug)
}
}
|
/*
Copyright 2020 Huawei Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package openharmony contains definitions common to open harmony.
package openharmony
// Assets describes build artefacts of an open harmony system.
type Assets struct {
BootLoaderPath string // "uboot.bin"
KernelPath string // "OHOS_Image.bin"
RootfsPath string // "rootfs.img"
UserfsPath string // "userfs.img"
}
|
/**
*
* Author: Robert Skelton
* Date 2.4.14
* Filename: structTest.go
* Email: robertjskelton@gmail.com
* Description: Basic structs in go testing.
*
*/
package main
import "fmt"
type Rectangle struct {
width, height int
}
func (r *Rectangle) area() float64 {
return float64(r.height * r.width)
}
func main() {
r := Rectangle{5, 12}
fmt.Println(r.area())
} |
package configsort
import (
"sort"
"github.com/warrenharper/restapi/configuration"
)
type Orderer func(i, j int) bool
// Sorter sorts functions based on it's Order
type Sorter struct {
Configs []configuration.Configuration
Order Orderer
}
// Sort sorts the configs according to the order that was given.
func (s *Sorter) Sort(o Orderer, configs []configuration.Configuration) []configuration.Configuration {
s.Configs = configs
s.Order = o
sort.Sort(s)
return s.Configs
}
// Sort reverse sorts the configs according to the order that was given.
func (s *Sorter) Reverse(o Orderer, configs []configuration.Configuration) []configuration.Configuration {
s.Configs = configs
s.Order = o
sort.Reverse(s)
return s.Configs
}
func (s Sorter) Len() int {
return len(s.Configs)
}
func (s Sorter) Swap(i, j int) {
temp := s.Configs[i]
s.Configs[i] = s.Configs[j]
s.Configs[j] = temp
}
func (s Sorter) Less(i, j int) bool {
return s.Order(i, j)
}
// ByName returns true if the name of the configuration at index i is less than
// the name of the configuration at index j.
func (s *Sorter) ByName(i, j int) bool {
return s.Configs[i].Name < s.Configs[j].Name
}
// ByHostName returns true if the HostName of the configuration at index i is less than
// the HostName of the configuration at index j.
func (s *Sorter) ByHostName(i, j int) bool {
return s.Configs[i].HostName < s.Configs[j].HostName
}
// ByPort returns true if the port of the configuration at index i is less than
// the port of the configuration at index j.
func (s *Sorter) ByPort(i, j int) bool {
return s.Configs[i].Port < s.Configs[j].Port
}
// ByUsername returns true if the username of the configuration at index i is less than
// the username of the configuration at index j.
func (s *Sorter) ByUsername(i, j int) bool {
return s.Configs[i].Username < s.Configs[i].Username
}
|
package main
import (
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
)
func main(){
// var num int
router := mux.NewRouter()
router.HandleFunc("/",index)
http.ListenAndServe(":4000",router)
}
func index(res http.ResponseWriter,req *http.Request) {
fmt.Println("Hello Go..")
fmt.Println(time.Now())
} |
package main
import (
"github.com/typical-go/typical-go/pkg/typgo"
)
var descriptor = typgo.Descriptor{
ProjectName: "hello-world",
ProjectVersion: "1.0.0",
Tasks: []typgo.Tasker{
// compile
&typgo.GoBuild{},
// run
&typgo.RunBinary{
Before: typgo.TaskNames{"build"},
},
},
}
func main() {
typgo.Start(&descriptor)
}
|
package main
import (
"fmt"
)
/**
go的条件语句
*/
func main() {
condition1()
fmt.Println("--------------")
condition2()
fmt.Println("--------------")
condition3()
fmt.Println("--------------")
condition4()
fmt.Println("--------------")
condition5()
}
/**
select语句:TODO
select {
case communication clause :
statement(s);
case communication clause :
statement(s);
default :
statement(s);
}
*/
func condition5() {
var c1, c2, c3 chan int
var i1, i2 int
select {
case i1 = <-c1:
fmt.Printf("received ", i1, " from c1\n")
case c2 <- i2:
fmt.Printf("sent ", i2, " to c2\n")
case i3, ok := (<-c3): // same as: i3, ok := <-c3
if ok {
fmt.Printf("received ", i3, " from c3\n")
} else {
fmt.Printf("c3 is closed\n")
}
default:
fmt.Printf("no communication\n")
}
}
/**
fallthrough:会强制执行后面的 case 语句,fallthrough 不会判断下一条 case 的表达式结果是否为 true。
--- fallthrough,类似于Java中的每个case都不加break,case匹配到的语句后的语句都会执行
*/
func condition4() {
var grade = "B"
var marks = 90
switch marks {
case 90:
grade = "A"
fallthrough
case 80:
grade = "B"
fallthrough
case 50, 60, 70:
grade = "C"
}
// grade is C
fmt.Println("grade is ", grade)
}
/**
type...switch
switch x.(type){
case type:
statement(s);
case type:
statement(s);
default:
statement(s);
}
*/
func condition3() {
var x interface{}
// :=:声明并赋值,并且系统自动推断类型,不需要var关键字
switch i := x.(type) {
case nil:
fmt.Println(" x 的类型 :%T", i)
case int:
fmt.Println("x 是 int 型")
case float64:
fmt.Println("x 是 float64 型")
case func(int) float64:
fmt.Println("x 是 func(int) 型")
case bool, string:
fmt.Println("x 是 bool 或 string 型")
default:
fmt.Println("未知型")
}
}
/**
switch语句:不使用fallthrough,类似于Java中的每个case都加了break
switch var1 {
case val1:
...
case val2:
...
default:
...
}
*/
func condition2() {
var grade = "B"
var marks = 90
switch marks {
case 90:
grade = "A"
case 80:
grade = "B"
case 50, 60, 70:
grade = "C"
}
// grade is A
fmt.Println("grade is ", grade)
}
/**
if语法:
if 布尔表达式 {//在布尔表达式为 true 时执行 }
*/
func condition1() {
var a int = 10
if a < 20 {
if a == 10 {
fmt.Println("a等于10")
} else {
fmt.Println("a小于20.")
}
} else {
fmt.Println("a不小于20.")
}
}
|
package kata
import (
"strings"
)
func duplicate_count(s1 string) int {
var res int
counts := make(map[string]int)
for _, s := range s1 {
counts[strings.ToLower(string(s))]++
}
for _, count := range counts {
if count > 1 {
res++
}
}
return res
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.