text stringlengths 11 4.05M |
|---|
package machinetypes
import (
"encoding/json"
"log"
"net/http"
"github.com/qnib/metahub/pkg/daemon"
"github.com/qnib/metahub/pkg/storage"
"github.com/gorilla/context"
)
func getAddHandler(service daemon.Service) http.Handler {
storageService := service.Storage()
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
accountName := context.Get(r, "accountName").(string)
decoder := json.NewDecoder(r.Body)
var mt storage.MachineType
err := decoder.Decode(&mt)
if err != nil {
log.Printf("error decoding request data: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
machineTypeService, err := storageService.MachineTypeService(ctx)
if err != nil {
log.Printf("failed to create MachineTypeService: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if err := machineTypeService.Add(accountName, &mt); err != nil {
log.Printf("failed adding machine type: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
d, err := json.Marshal(mt)
if err != nil {
log.Printf("error marshaling response data: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("content-type", "application/json")
w.Write(d)
})
}
|
// Package aws2tf ingests JSON from AWS CLI and emits Terraform templates.
// Currently, only security groups are implemented. Poorly, at that.
//
// TODO:
// - learn interfaces
// - make the ipRange.print* methods nicer
package aws2tf
import (
"fmt"
)
type ipRange struct {
Description string
CidrIP string
}
func (ir ipRange) String() string {
s := fmt.Sprintf(" cidr_blocks = [\"%s\"]\n", ir.CidrIP)
if ir.Description != "" {
s += fmt.Sprintf(" description = \"%s\"\n", ir.Description)
}
return s
}
func (ir ipRange) printIngress(ip ipPermissionIngress) string {
s := " ingress {\n"
s += fmt.Sprintf("%s", ip)
s += fmt.Sprintf("%s", ir)
s += " }\n"
return s
}
func (ir ipRange) printEgress(ip ipPermissionEgress) string {
s := " egress {\n"
s += fmt.Sprintf("%s", ip)
s += fmt.Sprintf("%s", ir)
s += " }\n"
return s
}
type ipPermission struct {
FromPort float64
ToPort float64
IPProtocol string
IPRanges []ipRange
UserIDGroupPairs []userIDGroupPair
}
func (ip ipPermission) String() string {
s := fmt.Sprintf(" from_port = %d\n", int(ip.FromPort))
s += fmt.Sprintf(" to_port = %d\n", int(ip.ToPort))
s += fmt.Sprintf(" protocol = \"%s\"\n", ip.IPProtocol)
return s
}
type ipPermissionIngress struct {
ipPermission
}
type ipPermissionIngresses []ipPermissionIngress
func (ipi ipPermissionIngresses) String() string {
var s string
for _, i := range ipi {
for _, j := range i.IPRanges {
s += j.printIngress(i)
}
for _, k := range i.UserIDGroupPairs {
s += k.printIngress(i)
}
}
return s
}
type ipPermissionEgress struct {
ipPermission
}
type ipPermissionEgresses []ipPermissionEgress
func (ipe ipPermissionEgresses) String() string {
var s string
for _, i := range ipe {
for _, j := range i.IPRanges {
s += j.printEgress(i)
}
for _, k := range i.UserIDGroupPairs {
s += k.printEgress(i)
}
}
return s
}
type userIDGroupPair struct {
UserID string
GroupID string
Description string
VpcID string
VpcPeeringConnectionID string
PeeringStatus string
}
func (uigp userIDGroupPair) String() string {
var sg string
if uigp.VpcID != "" || uigp.VpcPeeringConnectionID != "" || uigp.PeeringStatus != "" {
sg = uigp.UserID + "/" + uigp.GroupID
} else {
sg = uigp.GroupID
}
s := fmt.Sprintf(" security_groups = [\"%s\"]\n", sg)
if uigp.Description != "" {
s += fmt.Sprintf(" description = \"%s\"\n", uigp.Description)
}
return s
}
func (uigp userIDGroupPair) printIngress(ip ipPermissionIngress) string {
s := " ingress {\n"
s += fmt.Sprintf("%s", ip)
s += fmt.Sprintf("%s", uigp)
s += " }\n"
return s
}
func (uigp userIDGroupPair) printEgress(ip ipPermissionEgress) string {
s := " egress {\n"
s += fmt.Sprintf("%s", ip)
s += fmt.Sprintf("%s", uigp)
s += " }\n"
return s
}
type tag struct {
Key string
Value string
}
func (t tag) String() string {
return fmt.Sprintf(" \"%s\" = \"%s\"\n", t.Key, t.Value)
}
type tags []tag
func (t tags) String() string {
s := " tags {\n"
for _, x := range t {
s += fmt.Sprintf("%s", x)
}
s += " }\n"
return s
}
type securityGroup struct {
GroupName string
Description string
GroupID string
VpcID string
IPPermissions ipPermissionIngresses
IPPermissionsEgress ipPermissionEgresses
Tags tags
}
func (sg securityGroup) String() string {
s := fmt.Sprintf("resource \"aws_security_group\" \"%s-%s\" {\n", sg.VpcID, sg.GroupName)
s += fmt.Sprintf(" name = \"%s\"\n", sg.GroupName)
s += fmt.Sprintf(" description = \"%s\"\n", sg.Description)
s += fmt.Sprintf(" vpc_id = \"%s\"\n", sg.VpcID)
if len(sg.IPPermissions) > 0 {
s += fmt.Sprintf("%s", sg.IPPermissions)
}
if len(sg.IPPermissionsEgress) > 0 {
s += fmt.Sprintf("%s", sg.IPPermissionsEgress)
}
if len(sg.Tags) > 0 {
s += fmt.Sprintf("%s", sg.Tags)
}
s += "}\n"
return s
}
// SGFile describes a JSON file which would be produced by the AWS CLI, with details of one or more security groups.
// This JSON file would be produced by the 'aws ec2 describe-security-groups' command.
type SGFile struct {
SecurityGroups []securityGroup
}
func (sgf SGFile) String() string {
var s string
for _, sg := range sgf.SecurityGroups {
s += fmt.Sprintf("%s\n", sg)
}
return s
}
|
package main
import (
"im/config"
"im/internal/logic/api"
"im/pkg/db"
"im/pkg/logger"
"im/pkg/rpc"
)
func main() {
logger.Init()
db.InitMysql(config.Logic.MySQL)
db.InitRedis(config.Logic.RedisIP, config.Logic.RedisPassword)
// 初始化RpcClient
rpc.InitConnIntClient(config.Logic.ConnRPCAddrs)
rpc.InitUserIntClient(config.Logic.UserRPCAddrs)
api.StartRpcServer()
logger.Logger.Info("logic server start")
select {}
}
|
// Copyright 2019 GoAdmin Core Team. All rights reserved.
// Use of this source code is governed by a Apache-2.0 style
// license that can be found in the LICENSE file.
package dialect
import (
"strings"
"github.com/GoAdminGroup/go-admin/modules/config"
)
// Dialect is methods set of different driver.
type Dialect interface {
// GetName get dialect's name
GetName() string
// ShowColumns show columns of specified table
ShowColumns(table string) string
// ShowTables show tables of database
ShowTables() string
// Insert
Insert(comp *SQLComponent) string
// Delete
Delete(comp *SQLComponent) string
// Update
Update(comp *SQLComponent) string
// Select
Select(comp *SQLComponent) string
// GetDelimiter return the delimiter of Dialect.
GetDelimiter() string
}
// GetDialect return the default Dialect.
func GetDialect() Dialect {
return GetDialectByDriver(config.GetDatabases().GetDefault().Driver)
}
// GetDialectByDriver return the Dialect of given driver.
func GetDialectByDriver(driver string) Dialect {
switch driver {
case "mysql":
return mysql{
commonDialect: commonDialect{delimiter: "`", delimiter2: "`"},
}
case "mssql":
return mssql{
commonDialect: commonDialect{delimiter: "[", delimiter2: "]"},
}
case "postgresql":
return postgresql{
commonDialect: commonDialect{delimiter: `"`, delimiter2: `"`},
}
case "sqlite":
return sqlite{
commonDialect: commonDialect{delimiter: "`", delimiter2: "`"},
}
default:
return commonDialect{delimiter: "`", delimiter2: "`"}
}
}
// H is a shorthand of map.
type H map[string]interface{}
// SQLComponent is a sql components set.
type SQLComponent struct {
Fields []string
Functions []string
TableName string
Wheres []Where
Leftjoins []Join
Args []interface{}
Order string
Offset string
Limit string
WhereRaws string
UpdateRaws []RawUpdate
Group string
Statement string
Values H
}
// Where contains the operation and field.
type Where struct {
Operation string
Field string
Qmark string
}
// Join contains the table and field and operation.
type Join struct {
Table string
FieldA string
Operation string
FieldB string
}
// RawUpdate contains the expression and arguments.
type RawUpdate struct {
Expression string
Args []interface{}
}
// *******************************
// internal help function
// *******************************
func (sql *SQLComponent) getLimit() string {
if sql.Limit == "" {
return ""
}
return " limit " + sql.Limit + " "
}
func (sql *SQLComponent) getOffset() string {
if sql.Offset == "" {
return ""
}
return " offset " + sql.Offset + " "
}
func (sql *SQLComponent) getOrderBy() string {
if sql.Order == "" {
return ""
}
return " order by " + sql.Order + " "
}
func (sql *SQLComponent) getGroupBy() string {
if sql.Group == "" {
return ""
}
return " group by " + sql.Group + " "
}
func (sql *SQLComponent) getJoins(delimiter, delimiter2 string) string {
if len(sql.Leftjoins) == 0 {
return ""
}
joins := ""
for _, join := range sql.Leftjoins {
joins += " left join " + wrap(delimiter, delimiter2, join.Table) + " on " +
sql.processLeftJoinField(join.FieldA, delimiter, delimiter2) + " " + join.Operation + " " +
sql.processLeftJoinField(join.FieldB, delimiter, delimiter2) + " "
}
return joins
}
func (sql *SQLComponent) processLeftJoinField(field, delimiter, delimiter2 string) string {
arr := strings.Split(field, ".")
if len(arr) > 0 {
return delimiter + arr[0] + delimiter2 + "." + delimiter + arr[1] + delimiter2
}
return field
}
func (sql *SQLComponent) getFields(delimiter, delimiter2 string) string {
if len(sql.Fields) == 0 {
return "*"
}
fields := ""
if len(sql.Leftjoins) == 0 {
for k, field := range sql.Fields {
if sql.Functions[k] != "" {
fields += sql.Functions[k] + "(" + wrap(delimiter, delimiter2, field) + "),"
} else {
fields += wrap(delimiter, delimiter2, field) + ","
}
}
} else {
for _, field := range sql.Fields {
arr := strings.Split(field, ".")
if len(arr) > 1 {
fields += wrap(delimiter, delimiter2, arr[0]) + "." + wrap(delimiter, delimiter2, arr[1]) + ","
} else {
fields += wrap(delimiter, delimiter2, field) + ","
}
}
}
return fields[:len(fields)-1]
}
func wrap(delimiter, delimiter2, field string) string {
if field == "*" {
return "*"
}
return delimiter + field + delimiter2
}
func (sql *SQLComponent) getWheres(delimiter, delimiter2 string) string {
if len(sql.Wheres) == 0 {
if sql.WhereRaws != "" {
return " where " + sql.WhereRaws
}
return ""
}
wheres := " where "
var arr []string
for _, where := range sql.Wheres {
arr = strings.Split(where.Field, ".")
if len(arr) > 1 {
wheres += arr[0] + "." + wrap(delimiter, delimiter2, arr[1]) + " " + where.Operation + " " + where.Qmark + " and "
} else {
wheres += wrap(delimiter, delimiter2, where.Field) + " " + where.Operation + " " + where.Qmark + " and "
}
}
if sql.WhereRaws != "" {
return wheres + sql.WhereRaws
}
return wheres[:len(wheres)-5]
}
func (sql *SQLComponent) prepareUpdate(delimiter, delimiter2 string) {
fields := ""
args := make([]interface{}, 0)
if len(sql.Values) != 0 {
for key, value := range sql.Values {
fields += wrap(delimiter, delimiter2, key) + " = ?, "
args = append(args, value)
}
if len(sql.UpdateRaws) == 0 {
fields = fields[:len(fields)-2]
} else {
for i := 0; i < len(sql.UpdateRaws); i++ {
if i == len(sql.UpdateRaws)-1 {
fields += sql.UpdateRaws[i].Expression + " "
} else {
fields += sql.UpdateRaws[i].Expression + ","
}
args = append(args, sql.UpdateRaws[i].Args...)
}
}
sql.Args = append(args, sql.Args...)
} else {
if len(sql.UpdateRaws) == 0 {
panic("prepareUpdate: wrong parameter")
} else {
for i := 0; i < len(sql.UpdateRaws); i++ {
if i == len(sql.UpdateRaws)-1 {
fields += sql.UpdateRaws[i].Expression + " "
} else {
fields += sql.UpdateRaws[i].Expression + ","
}
args = append(args, sql.UpdateRaws[i].Args...)
}
}
sql.Args = append(args, sql.Args...)
}
sql.Statement = "update " + delimiter + sql.TableName + delimiter2 + " set " + fields + sql.getWheres(delimiter, delimiter2)
}
func (sql *SQLComponent) prepareInsert(delimiter, delimiter2 string) {
fields := " ("
quesMark := "("
for key, value := range sql.Values {
fields += wrap(delimiter, delimiter2, key) + ","
quesMark += "?,"
sql.Args = append(sql.Args, value)
}
fields = fields[:len(fields)-1] + ")"
quesMark = quesMark[:len(quesMark)-1] + ")"
sql.Statement = "insert into " + delimiter + sql.TableName + delimiter2 + fields + " values " + quesMark
}
|
package django
const Requirements = `
# Requirements
Django==1.8.3
PyMySQL==0.6.6
python-memcached==1.54
pytz==2015.4
#whitenoise==2.0.2
webassets==0.10.1
cssmin==0.2.0
jsmin==2.1.2
django-assets==0.10
django-markdown==0.8.4
django-easy-pjax==1.2.0
#django-material==0.4.1
djangorestframework==3.2.0
django-debug-toolbar==1.3.2
`
const DevSettings = `
`
|
package main
import (
"github.com/gin-gonic/gin"
api "app/pkg/api"
"os"
)
/*
* Setup main router
*/
func setupRouter() *gin.Engine {
router := gin.Default()
//Upload OPTIONS API Entry
router.OPTIONS("/upload", func(c *gin.Context) {
api.ApplyHeaders(c)
})
//Download GET API Entry
router.GET("/download", func(c *gin.Context) {
api.Download(c)
})
//Upload POST API Entry
router.POST("/upload", func(c *gin.Context) {
api.Upload(c)
})
return router
}
func main() {
port := os.Getenv("APP_API_PORT")
r := setupRouter()
r.Run(":" + port)
} |
package mobile
import (
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/textileio/go-textile/core"
"github.com/textileio/go-textile/mill"
"github.com/textileio/go-textile/pb"
)
// AddSchema adds a new schema via schema mill
func (m *Mobile) AddSchema(node []byte) ([]byte, error) {
if !m.node.Started() {
return nil, core.ErrStopped
}
model := new(pb.Node)
if err := proto.Unmarshal(node, model); err != nil {
return nil, err
}
marshaler := jsonpb.Marshaler{
OrigName: true,
}
jsn, err := marshaler.MarshalToString(model)
if err != nil {
return nil, err
}
added, err := m.node.AddFileIndex(&mill.Schema{}, core.AddFileConfig{
Input: []byte(jsn),
Media: "application/json",
})
if err != nil {
return nil, err
}
m.node.FlushCafes()
return proto.Marshal(added)
}
|
package dashboard
import (
"fmt"
"net/http"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
"github.com/iotaledger/wasp/packages/chain"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/registry"
"github.com/iotaledger/wasp/packages/state"
"github.com/iotaledger/wasp/packages/vm/core/accounts"
"github.com/iotaledger/wasp/packages/vm/core/blob"
"github.com/iotaledger/wasp/plugins/chains"
"github.com/labstack/echo/v4"
)
func chainBreadcrumb(e *echo.Echo, chainID coretypes.ChainID) Tab {
return Tab{
Path: e.Reverse("chain"),
Title: fmt.Sprintf("Chain %.8s…", chainID),
Href: e.Reverse("chain", chainID.String()),
}
}
func initChain(e *echo.Echo, r renderer) {
route := e.GET("/chain/:chainid", handleChain)
route.Name = "chain"
r[route.Path] = makeTemplate(e, tplChain, tplWs)
}
func handleChain(c echo.Context) error {
chainid, err := coretypes.NewChainIDFromBase58(c.Param("chainid"))
if err != nil {
return err
}
tab := chainBreadcrumb(c.Echo(), chainid)
result := &ChainTemplateParams{
BaseTemplateParams: BaseParams(c, tab),
ChainID: chainid,
}
result.ChainRecord, err = registry.GetChainRecord(&chainid)
if err != nil {
return err
}
if result.ChainRecord != nil && result.ChainRecord.Active {
result.VirtualState, result.Block, _, err = state.LoadSolidState(&chainid)
if err != nil {
return err
}
chain := chains.GetChain(chainid)
result.Committee.Size = chain.Size()
result.Committee.Quorum = chain.Quorum()
result.Committee.NumPeers = chain.NumPeers()
result.Committee.HasQuorum = chain.HasQuorum()
result.Committee.PeerStatus = chain.PeerStatus()
result.RootInfo, err = fetchRootInfo(chain)
if err != nil {
return err
}
result.Accounts, err = fetchAccounts(chain)
if err != nil {
return err
}
result.TotalAssets, err = fetchTotalAssets(chain)
if err != nil {
return err
}
result.Blobs, err = fetchBlobs(chain)
if err != nil {
return err
}
}
return c.Render(http.StatusOK, c.Path(), result)
}
func fetchAccounts(chain chain.Chain) ([]coretypes.AgentID, error) {
accounts, err := callView(chain, accounts.Interface.Hname(), accounts.FuncAccounts, nil)
if err != nil {
return nil, fmt.Errorf("accountsc view call failed: %v", err)
}
ret := make([]coretypes.AgentID, 0)
for k := range accounts {
agentid, _, err := codec.DecodeAgentID([]byte(k))
if err != nil {
return nil, err
}
ret = append(ret, agentid)
}
return ret, nil
}
func fetchTotalAssets(chain chain.Chain) (map[balance.Color]int64, error) {
bal, err := callView(chain, accounts.Interface.Hname(), accounts.FuncTotalAssets, nil)
if err != nil {
return nil, err
}
return accounts.DecodeBalances(bal)
}
func fetchBlobs(chain chain.Chain) (map[hashing.HashValue]uint32, error) {
ret, err := callView(chain, blob.Interface.Hname(), blob.FuncListBlobs, nil)
if err != nil {
return nil, err
}
return blob.DecodeDirectory(ret)
}
type ChainTemplateParams struct {
BaseTemplateParams
ChainID coretypes.ChainID
ChainRecord *registry.ChainRecord
Block state.Block
VirtualState state.VirtualState
RootInfo RootInfo
Accounts []coretypes.AgentID
TotalAssets map[balance.Color]int64
Blobs map[hashing.HashValue]uint32
Committee struct {
Size uint16
Quorum uint16
NumPeers uint16
HasQuorum bool
PeerStatus []*chain.PeerStatus
}
}
const tplChain = `
{{define "title"}}Chain details{{end}}
{{define "body"}}
{{ $chainid := .ChainID }}
{{if .ChainRecord}}
{{ $rootinfo := .RootInfo }}
{{ $desc := trim 50 $rootinfo.Description }}
<div class="card fluid">
<h2 class="section">{{if $desc}}{{$desc}}{{else}}Chain <tt>{{$chainid}}</tt>{{end}}</h2>
<dl>
<dt>ChainID</dt><dd><tt>{{.ChainRecord.ChainID}}</tt></dd>
<dt>Chain address</dt><dd>{{template "address" .RootInfo.ChainAddress}}</dd>
<dt>Chain color</dt><dd><tt>{{.RootInfo.ChainColor}}</tt></dd>
<dt>Active</dt><dd><tt>{{.ChainRecord.Active}}</tt></dd>
{{if .ChainRecord.Active}}
<dt>Owner ID</dt><dd>{{template "agentid" (args .ChainID $rootinfo.OwnerID)}}</dd>
<dt>Delegated Owner ID</dt><dd>
{{- if $rootinfo.OwnerIDDelegated -}}
{{- template "agentid" (args .ChainID $rootinfo.OwnerIDDelegated) -}}
{{- end -}}
</dd>
<dt>Default owner fee</dt><dd><tt>{{$rootinfo.DefaultOwnerFee}} {{$rootinfo.FeeColor}}</tt></dd>
<dt>Default validator fee</dt><dd><tt>{{$rootinfo.DefaultValidatorFee}} {{$rootinfo.FeeColor}}</tt></dd>
{{end}}
</dl>
</div>
{{if .ChainRecord.Active}}
<div class="card fluid">
<h3 class="section">Contracts</h3>
<dl>
{{range $_, $c := $rootinfo.Contracts}}
<dt><a href="{{ uri "chainContract" $chainid $c.Hname }}"><tt>{{trim 30 $c.Name}}</tt></a></dt>
<dd><tt>{{trim 50 $c.Description}}</tt></dd>
{{end}}
</dl>
</div>
<div class="card fluid">
<h3 class="section">On-chain accounts</h3>
<table>
<thead>
<tr>
<th>AgentID</th>
</tr>
</thead>
<tbody>
{{range $_, $agentid := .Accounts}}
<tr>
<td>{{template "agentid" (args $chainid $agentid)}}</td>
</tr>
{{end}}
</tbody>
</table>
<h4>Total assets</h4>
{{ template "balances" .TotalAssets }}
</div>
<div class="card fluid">
<h3 class="section">Blobs</h3>
<table>
<thead>
<tr>
<th style="flex: 2">Hash</th>
<th>Size (bytes)</th>
</tr>
</thead>
<tbody>
{{range $hash, $size := .Blobs}}
<tr>
<td style="flex: 2"><a href="{{ uri "chainBlob" $chainid (hashref $hash) }}"><tt>{{ hashref $hash }}</tt></a></td>
<td>{{ $size }}</td>
</tr>
{{end}}
</tbody>
</table>
</div>
<div class="card fluid">
<h3 class="section">State</h3>
<dl>
<dt>State index</dt><dd><tt>{{.Block.StateIndex}}</tt></dd>
<dt>State hash</dt><dd><tt>{{.VirtualState.Hash}}</tt></dd>
<dt>Last updated</dt><dd><tt>{{formatTimestamp .Block.Timestamp}}</tt> in transaction <tt>{{.Block.StateTransactionID}}</tt></dd>
</dl>
</div>
<div class="card fluid">
<h3 class="section">Committee</h3>
<dl>
<dt>Size</dt> <dd><tt>{{.Committee.Size}}</tt></dd>
<dt>Quorum</dt> <dd><tt>{{.Committee.Quorum}}</tt></dd>
<dt>NumPeers</dt> <dd><tt>{{.Committee.NumPeers}}</tt></dd>
<dt>HasQuorum</dt> <dd><tt>{{.Committee.HasQuorum}}</tt></dd>
</dl>
<h4>Peer status</h4>
<table>
<thead>
<tr>
<th>Index</th>
<th>ID</th>
<th>Status</th>
</tr>
</thead>
<tbody>
{{range $_, $s := .Committee.PeerStatus}}
<tr>
<td>{{$s.Index}}</td>
<td><tt>{{$s.PeeringID}}</tt></td>
<td>{{if $s.Connected}}up{{else}}down{{end}}</td>
</tr>
{{end}}
</tbody>
</table>
</div>
{{end}}
{{ template "ws" .ChainID }}
{{else}}
<div class="card fluid error">No chain record for ID <td>{{$chainid}}</tt></div>
{{end}}
{{end}}
`
|
package rpc
import (
"context"
"errors"
"time"
"github.com/rs/xid"
v1 "github.com/tinkerbell/pbnj/api/v1"
"github.com/tinkerbell/pbnj/grpc/oob/bmc"
"github.com/tinkerbell/pbnj/pkg/logging"
"github.com/tinkerbell/pbnj/pkg/task"
)
// BmcService for doing BMC actions.
type BmcService struct {
Log logging.Logger
// Timeout is how long a task should be run
// before it is cancelled. This is for use in a
// TaskRunner.Execute function that runs all BMC
// interactions in the background.
Timeout time.Duration
TaskRunner task.Task
v1.UnimplementedBMCServer
}
// NetworkSource sets the BMC network source.
func (b *BmcService) NetworkSource(_ context.Context, _ *v1.NetworkSourceRequest) (*v1.NetworkSourceResponse, error) {
return nil, errors.New("not implemented")
}
// Reset calls a reset on a BMC.
func (b *BmcService) Reset(ctx context.Context, in *v1.ResetRequest) (*v1.ResetResponse, error) {
l := b.Log.GetContextLogger(ctx)
taskID := xid.New().String()
l = l.WithValues("taskID", taskID)
l.Info(
"start Reset request",
"username", in.Authn.GetDirectAuthn().GetUsername(),
"vendor", in.Vendor.GetName(),
"resetKind", in.GetResetKind().String(),
)
execFunc := func(s chan string) (string, error) {
t, err := bmc.NewBMCResetter(
bmc.WithLogger(l),
bmc.WithStatusMessage(s),
bmc.WithResetRequest(in),
)
if err != nil {
return "", err
}
taskCtx, cancel := context.WithTimeout(ctx, b.Timeout)
// cant defer this cancel because it cancels the context before the func is run
// cant have cancel be _ because go vet complains.
// TODO(jacobweinstock): maybe move this context withTimeout into the TaskRunner.Execute function
_ = cancel
return "", t.BMCReset(taskCtx, in.ResetKind.String())
}
b.TaskRunner.Execute(ctx, "bmc reset", taskID, execFunc)
return &v1.ResetResponse{TaskId: taskID}, nil
}
// CreateUser sets the next boot device of a machine.
func (b *BmcService) CreateUser(ctx context.Context, in *v1.CreateUserRequest) (*v1.CreateUserResponse, error) {
// TODO figure out how not to have to do this, but still keep the logging abstraction clean?
l := b.Log.GetContextLogger(ctx)
taskID := xid.New().String()
l = l.WithValues("taskID", taskID)
l.Info(
"start CreateUser request",
"username", in.Authn.GetDirectAuthn().GetUsername(),
"vendor", in.Vendor.GetName(),
"userCreds.Username", in.UserCreds.Username,
"userCreds.UserRole", in.UserCreds.UserRole,
)
execFunc := func(s chan string) (string, error) {
t, err := bmc.NewBMC(
bmc.WithCreateUserRequest(in),
bmc.WithLogger(l),
bmc.WithStatusMessage(s),
)
if err != nil {
return "", err
}
taskCtx, cancel := context.WithTimeout(context.Background(), b.Timeout)
_ = cancel
return "", t.CreateUser(taskCtx)
}
b.TaskRunner.Execute(ctx, "creating user", taskID, execFunc)
return &v1.CreateUserResponse{TaskId: taskID}, nil
}
// UpdateUser updates a users credentials on a BMC.
func (b *BmcService) UpdateUser(ctx context.Context, in *v1.UpdateUserRequest) (*v1.UpdateUserResponse, error) {
// TODO figure out how not to have to do this, but still keep the logging abstraction clean?
l := b.Log.GetContextLogger(ctx)
taskID := xid.New().String()
l = l.WithValues("taskID", taskID)
l.Info(
"start UpdateUser request",
"username", in.Authn.GetDirectAuthn().GetUsername(),
"vendor", in.Vendor.GetName(),
"userCreds.Username", in.UserCreds.Username,
"userCreds.UserRole", in.UserCreds.UserRole,
)
execFunc := func(s chan string) (string, error) {
t, err := bmc.NewBMC(
bmc.WithUpdateUserRequest(in),
bmc.WithLogger(l),
bmc.WithStatusMessage(s),
)
if err != nil {
return "", err
}
taskCtx, cancel := context.WithTimeout(context.Background(), b.Timeout)
_ = cancel
return "", t.UpdateUser(taskCtx)
}
b.TaskRunner.Execute(ctx, "updating user", taskID, execFunc)
return &v1.UpdateUserResponse{TaskId: taskID}, nil
}
// DeleteUser deletes a user on a BMC.
func (b *BmcService) DeleteUser(ctx context.Context, in *v1.DeleteUserRequest) (*v1.DeleteUserResponse, error) {
// TODO figure out how not to have to do this, but still keep the logging abstraction clean?
l := b.Log.GetContextLogger(ctx)
taskID := xid.New().String()
l = l.WithValues("taskID", taskID)
l.Info(
"start DeleteUser request",
"username", in.Authn.GetDirectAuthn().GetUsername(),
"vendor", in.Vendor.GetName(),
"userCreds.Username", in.Username,
)
execFunc := func(s chan string) (string, error) {
t, err := bmc.NewBMC(
bmc.WithDeleteUserRequest(in),
bmc.WithLogger(l),
bmc.WithStatusMessage(s),
)
if err != nil {
return "", err
}
taskCtx, cancel := context.WithTimeout(context.Background(), b.Timeout)
_ = cancel
return "", t.DeleteUser(taskCtx)
}
b.TaskRunner.Execute(ctx, "deleting user", taskID, execFunc)
return &v1.DeleteUserResponse{TaskId: taskID}, nil
}
|
package initcmd
import (
"bytes"
"path/filepath"
"reflect"
"time"
"github.com/devspace-cloud/devspace/pkg/util/survey"
"github.com/sirupsen/logrus"
"github.com/devspace-cloud/devspace/cmd"
"github.com/devspace-cloud/devspace/e2e/utils"
"github.com/devspace-cloud/devspace/pkg/devspace/build/builder/helper"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/config"
cloudconfiglatest "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/configure"
fakecloudclient "github.com/devspace-cloud/devspace/pkg/devspace/cloud/client/testing"
fakecloudconfig "github.com/devspace-cloud/devspace/pkg/devspace/cloud/config/testing"
fakecloudprovider "github.com/devspace-cloud/devspace/pkg/devspace/cloud/testing"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/util/log"
fakesurvey "github.com/devspace-cloud/devspace/pkg/util/survey/testing"
"github.com/devspace-cloud/devspace/pkg/devspace/docker"
fakedocker "github.com/devspace-cloud/devspace/pkg/devspace/docker/testing"
dockertypes "github.com/docker/docker/api/types"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
type initTestCase struct {
name string
answers []string
expectedConfig *latest.Config
tempLogger log.Logger
}
type customFactory struct {
*utils.BaseCustomFactory
}
type customLogger struct {
log.Logger
*fakesurvey.FakeSurvey
}
func (c *customFactory) NewDockerClientWithMinikube(currentKubeContext string, preferMinikube bool, log log.Logger) (docker.Client, error) {
fakeDockerClient := &fakedocker.FakeClient{
AuthConfig: &dockertypes.AuthConfig{
Username: "user",
Password: "pass",
},
}
return fakeDockerClient, nil
}
func (c *customFactory) GetProvider(useProviderName string, log log.Logger) (cloud.Provider, error) {
return fakecloudprovider.NewFakeProvider(cloudconfiglatest.Provider{}, fakecloudclient.NewFakeClient()), nil
}
func (c *customFactory) NewCloudConfigLoader() config.Loader {
return fakecloudconfig.NewLoader(&cloudconfiglatest.Config{
Version: cloudconfiglatest.Version,
Default: "test-provider",
})
}
func (c *customFactory) NewConfigureManager(config *latest.Config, log log.Logger) configure.Manager {
return configure.NewManager(c, config, log)
}
func (c *customLogger) Question(params *survey.QuestionOptions) (string, error) {
return c.FakeSurvey.Question(params)
}
// GetLog implements interface
func (c *customFactory) GetLog() log.Logger {
if c.CacheLogger == nil {
if c.Verbose {
c.CacheLogger = &customLogger{
Logger: log.GetInstance(),
FakeSurvey: fakesurvey.NewFakeSurvey(),
}
} else {
c.Buff = &bytes.Buffer{}
c.CacheLogger = &customLogger{
Logger: log.NewStreamLogger(c.Buff, logrus.InfoLevel),
FakeSurvey: fakesurvey.NewFakeSurvey(),
}
}
}
return c.CacheLogger
}
var availableSubTests = map[string]func(factory *customFactory, logger log.Logger) error{
"create_dockerfile": CreateDockerfile,
"use_existing_dockerfile": UseExistingDockerfile,
"use_dockerfile": UseDockerfile,
"use_manifests": UseManifests,
"use_chart": UseChart,
}
type Runner struct{}
var RunNew = &Runner{}
func (r *Runner) SubTests() []string {
subTests := []string{}
for k := range availableSubTests {
subTests = append(subTests, k)
}
return subTests
}
func (r *Runner) Run(subTests []string, ns string, pwd string, logger log.Logger, verbose bool, timeout int) error {
logger.Info("Run 'init' test")
// Populates the tests to run with all the available sub tests if no sub tests is specified
if len(subTests) == 0 {
for subTestName := range availableSubTests {
subTests = append(subTests, subTestName)
}
}
f := &customFactory{
BaseCustomFactory: &utils.BaseCustomFactory{
Pwd: pwd,
Verbose: verbose,
Timeout: timeout,
},
}
// Runs the tests
for _, subTestName := range subTests {
f.ResetLog()
c1 := make(chan error)
go func() {
err := func() error {
f.Namespace = utils.GenerateNamespaceName("test-init-" + subTestName)
err := availableSubTests[subTestName](f, logger)
utils.PrintTestResult("init", subTestName, err, logger)
if err != nil {
return errors.Errorf("test 'init' failed: %s %v", f.GetLogContents(), err)
}
return nil
}()
c1 <- err
}()
select {
case err := <-c1:
if err != nil {
return err
}
case <-time.After(time.Duration(timeout) * time.Second):
return errors.Errorf("Timeout error - the test did not return within the specified timeout of %v seconds: %s", timeout, f.GetLogContents())
}
}
return nil
}
func runTest(f *customFactory, testCase initTestCase) error {
initConfig := cmd.InitCmd{
Dockerfile: helper.DefaultDockerfilePath,
Reconfigure: false,
Context: "",
Provider: "",
}
for _, a := range testCase.answers {
f.GetLog().(*customLogger).SetNextAnswer(a)
}
// runs init cmd
err := initConfig.Run(f, nil, nil)
if err != nil {
return err
}
if testCase.expectedConfig != nil {
config, err := f.NewConfigLoader(nil, nil).Load()
if err != nil {
return err
}
isEqual := reflect.DeepEqual(config, testCase.expectedConfig)
if !isEqual {
configYaml, _ := yaml.Marshal(config)
expectedYaml, _ := yaml.Marshal(testCase.expectedConfig)
return errors.Errorf("TestCase '%v': Got\n %s\n\n, but expected\n\n %s\n", testCase.name, configYaml, expectedYaml)
}
}
return nil
}
func beforeTest(f *customFactory, logger log.Logger, testDir string) error {
testDir = filepath.FromSlash(testDir)
dirPath, dirName, err := utils.CreateTempDir()
if err != nil {
return err
}
f.DirPath = dirPath
f.DirName = dirName
// Copy the testdata into the temp dir
err = utils.Copy(testDir, dirPath)
if err != nil {
return err
}
// Change working directory
err = utils.ChangeWorkingDir(dirPath, f.GetLog())
if err != nil {
return err
}
return nil
}
func afterTest(f *customFactory) {
utils.DeleteTempAndResetWorkingDir(f.DirPath, f.Pwd, f.GetLog())
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"container/heap"
"context"
"errors"
"slices"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/expression"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/util"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/disk"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/memory"
)
// SortExec represents sorting executor.
type SortExec struct {
exec.BaseExecutor
ByItems []*util.ByItems
Idx int
fetched bool
schema *expression.Schema
// keyColumns is the column index of the by items.
keyColumns []int
// keyCmpFuncs is used to compare each ByItem.
keyCmpFuncs []chunk.CompareFunc
// rowChunks is the chunks to store row values.
rowChunks *chunk.SortedRowContainer
memTracker *memory.Tracker
diskTracker *disk.Tracker
// partitionList is the chunks to store row values for partitions. Every partition is a sorted list.
partitionList []*chunk.SortedRowContainer
// multiWayMerge uses multi-way merge for spill disk.
// The multi-way merge algorithm can refer to https://en.wikipedia.org/wiki/K-way_merge_algorithm
multiWayMerge *multiWayMerge
// spillAction save the Action for spill disk.
spillAction *chunk.SortAndSpillDiskAction
}
// Close implements the Executor Close interface.
func (e *SortExec) Close() error {
for _, container := range e.partitionList {
err := container.Close()
if err != nil {
return err
}
}
e.partitionList = e.partitionList[:0]
if e.rowChunks != nil {
e.memTracker.Consume(-e.rowChunks.GetMemTracker().BytesConsumed())
e.rowChunks = nil
}
e.memTracker = nil
e.diskTracker = nil
e.multiWayMerge = nil
if e.spillAction != nil {
e.spillAction.SetFinished()
}
e.spillAction = nil
return e.Children(0).Close()
}
// Open implements the Executor Open interface.
func (e *SortExec) Open(ctx context.Context) error {
e.fetched = false
e.Idx = 0
// To avoid duplicated initialization for TopNExec.
if e.memTracker == nil {
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.diskTracker = memory.NewTracker(e.ID(), -1)
e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker)
}
e.partitionList = e.partitionList[:0]
return e.Children(0).Open(ctx)
}
// Next implements the Executor Next interface.
// Sort constructs the result following these step:
// 1. Read as mush as rows into memory.
// 2. If memory quota is triggered, sort these rows in memory and put them into disk as partition 1, then reset
// the memory quota trigger and return to step 1
// 3. If memory quota is not triggered and child is consumed, sort these rows in memory as partition N.
// 4. Merge sort if the count of partitions is larger than 1. If there is only one partition in step 4, it works
// just like in-memory sort before.
func (e *SortExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if !e.fetched {
e.initCompareFuncs()
e.buildKeyColumns()
err := e.fetchRowChunks(ctx)
if err != nil {
return err
}
e.fetched = true
}
if len(e.partitionList) == 0 {
return nil
}
if len(e.partitionList) > 1 {
if err := e.externalSorting(req); err != nil {
return err
}
} else {
for !req.IsFull() && e.Idx < e.partitionList[0].NumRow() {
row, err := e.partitionList[0].GetSortedRow(e.Idx)
if err != nil {
return err
}
req.AppendRow(row)
e.Idx++
}
}
return nil
}
func (e *SortExec) externalSorting(req *chunk.Chunk) (err error) {
if e.multiWayMerge == nil {
e.multiWayMerge = &multiWayMerge{e.lessRow, e.compressRow, make([]partitionPointer, 0, len(e.partitionList))}
for i := 0; i < len(e.partitionList); i++ {
row, err := e.partitionList[i].GetSortedRow(0)
if err != nil {
return err
}
e.multiWayMerge.elements = append(e.multiWayMerge.elements, partitionPointer{row: row, partitionID: i, consumed: 0})
}
heap.Init(e.multiWayMerge)
}
for !req.IsFull() && e.multiWayMerge.Len() > 0 {
partitionPtr := e.multiWayMerge.elements[0]
req.AppendRow(partitionPtr.row)
partitionPtr.consumed++
if partitionPtr.consumed >= e.partitionList[partitionPtr.partitionID].NumRow() {
heap.Remove(e.multiWayMerge, 0)
continue
}
partitionPtr.row, err = e.partitionList[partitionPtr.partitionID].
GetSortedRow(partitionPtr.consumed)
if err != nil {
return err
}
e.multiWayMerge.elements[0] = partitionPtr
heap.Fix(e.multiWayMerge, 0)
}
return nil
}
func (e *SortExec) fetchRowChunks(ctx context.Context) error {
fields := retTypes(e)
byItemsDesc := make([]bool, len(e.ByItems))
for i, byItem := range e.ByItems {
byItemsDesc[i] = byItem.Desc
}
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
if variable.EnableTmpStorageOnOOM.Load() {
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
}
for {
chk := tryNewCacheChunk(e.Children(0))
err := Next(ctx, e.Children(0), chk)
if err != nil {
return err
}
rowCount := chk.NumRows()
if rowCount == 0 {
break
}
if err := e.rowChunks.Add(chk); err != nil {
if errors.Is(err, chunk.ErrCannotAddBecauseSorted) {
e.partitionList = append(e.partitionList, e.rowChunks)
e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs)
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker)
e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks)
e.spillAction = e.rowChunks.ActionSpill()
failpoint.Inject("testSortedRowContainerSpill", func(val failpoint.Value) {
if val.(bool) {
e.spillAction = e.rowChunks.ActionSpillForTest()
defer e.spillAction.WaitForTest()
}
})
e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction)
err = e.rowChunks.Add(chk)
}
if err != nil {
return err
}
}
}
failpoint.Inject("SignalCheckpointForSort", func(val failpoint.Value) {
if val.(bool) {
if e.Ctx().GetSessionVars().ConnectionID == 123456 {
e.Ctx().GetSessionVars().MemTracker.NeedKill.Store(true)
}
}
})
if e.rowChunks.NumRow() > 0 {
e.rowChunks.Sort()
e.partitionList = append(e.partitionList, e.rowChunks)
}
return nil
}
func (e *SortExec) initCompareFuncs() {
e.keyCmpFuncs = make([]chunk.CompareFunc, len(e.ByItems))
for i := range e.ByItems {
keyType := e.ByItems[i].Expr.GetType()
e.keyCmpFuncs[i] = chunk.GetCompareFunc(keyType)
}
}
func (e *SortExec) buildKeyColumns() {
e.keyColumns = make([]int, 0, len(e.ByItems))
for _, by := range e.ByItems {
col := by.Expr.(*expression.Column)
e.keyColumns = append(e.keyColumns, col.Index)
}
}
func (e *SortExec) lessRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp < 0 {
return true
} else if cmp > 0 {
return false
}
}
return false
}
func (e *SortExec) compressRow(rowI, rowJ chunk.Row) int {
for i, colIdx := range e.keyColumns {
cmpFunc := e.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if e.ByItems[i].Desc {
cmp = -cmp
}
if cmp != 0 {
return cmp
}
}
return 0
}
type partitionPointer struct {
row chunk.Row
partitionID int
consumed int
}
type multiWayMerge struct {
lessRowFunction func(rowI chunk.Row, rowJ chunk.Row) bool
compressRowFunction func(rowI chunk.Row, rowJ chunk.Row) int
elements []partitionPointer
}
func (h *multiWayMerge) Less(i, j int) bool {
rowI := h.elements[i].row
rowJ := h.elements[j].row
return h.lessRowFunction(rowI, rowJ)
}
func (h *multiWayMerge) Len() int {
return len(h.elements)
}
func (*multiWayMerge) Push(interface{}) {
// Should never be called.
}
func (h *multiWayMerge) Pop() interface{} {
h.elements = h.elements[:len(h.elements)-1]
return nil
}
func (h *multiWayMerge) Swap(i, j int) {
h.elements[i], h.elements[j] = h.elements[j], h.elements[i]
}
// TopNExec implements a Top-N algorithm and it is built from a SELECT statement with ORDER BY and LIMIT.
// Instead of sorting all the rows fetched from the table, it keeps the Top-N elements only in a heap to reduce memory usage.
type TopNExec struct {
SortExec
limit *plannercore.PhysicalLimit
totalLimit uint64
// rowChunks is the chunks to store row values.
rowChunks *chunk.List
// rowPointer store the chunk index and row index for each row.
rowPtrs []chunk.RowPtr
chkHeap *topNChunkHeap
}
// topNChunkHeap implements heap.Interface.
type topNChunkHeap struct {
*TopNExec
}
// Less implement heap.Interface, but since we mantains a max heap,
// this function returns true if row i is greater than row j.
func (h *topNChunkHeap) Less(i, j int) bool {
rowI := h.rowChunks.GetRow(h.rowPtrs[i])
rowJ := h.rowChunks.GetRow(h.rowPtrs[j])
return h.greaterRow(rowI, rowJ)
}
func (h *topNChunkHeap) greaterRow(rowI, rowJ chunk.Row) bool {
for i, colIdx := range h.keyColumns {
cmpFunc := h.keyCmpFuncs[i]
cmp := cmpFunc(rowI, colIdx, rowJ, colIdx)
if h.ByItems[i].Desc {
cmp = -cmp
}
if cmp > 0 {
return true
} else if cmp < 0 {
return false
}
}
return false
}
func (h *topNChunkHeap) Len() int {
return len(h.rowPtrs)
}
func (*topNChunkHeap) Push(interface{}) {
// Should never be called.
}
func (h *topNChunkHeap) Pop() interface{} {
h.rowPtrs = h.rowPtrs[:len(h.rowPtrs)-1]
// We don't need the popped value, return nil to avoid memory allocation.
return nil
}
func (h *topNChunkHeap) Swap(i, j int) {
h.rowPtrs[i], h.rowPtrs[j] = h.rowPtrs[j], h.rowPtrs[i]
}
// keyColumnsLess is the less function for key columns.
func (e *TopNExec) keyColumnsLess(i, j chunk.RowPtr) bool {
rowI := e.rowChunks.GetRow(i)
rowJ := e.rowChunks.GetRow(j)
return e.lessRow(rowI, rowJ)
}
func (e *TopNExec) keyColumnsCompare(i, j chunk.RowPtr) int {
rowI := e.rowChunks.GetRow(i)
rowJ := e.rowChunks.GetRow(j)
return e.compressRow(rowI, rowJ)
}
func (e *TopNExec) initPointers() {
e.rowPtrs = make([]chunk.RowPtr, 0, e.rowChunks.Len())
e.memTracker.Consume(int64(8 * e.rowChunks.Len()))
for chkIdx := 0; chkIdx < e.rowChunks.NumChunks(); chkIdx++ {
rowChk := e.rowChunks.GetChunk(chkIdx)
for rowIdx := 0; rowIdx < rowChk.NumRows(); rowIdx++ {
e.rowPtrs = append(e.rowPtrs, chunk.RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)})
}
}
}
// Open implements the Executor Open interface.
func (e *TopNExec) Open(ctx context.Context) error {
e.memTracker = memory.NewTracker(e.ID(), -1)
e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker)
e.fetched = false
e.Idx = 0
return e.Children(0).Open(ctx)
}
// Next implements the Executor Next interface.
func (e *TopNExec) Next(ctx context.Context, req *chunk.Chunk) error {
req.Reset()
if !e.fetched {
e.totalLimit = e.limit.Offset + e.limit.Count
e.Idx = int(e.limit.Offset)
err := e.loadChunksUntilTotalLimit(ctx)
if err != nil {
return err
}
err = e.executeTopN(ctx)
if err != nil {
return err
}
e.fetched = true
}
if e.Idx >= len(e.rowPtrs) {
return nil
}
if !req.IsFull() {
numToAppend := mathutil.Min(len(e.rowPtrs)-e.Idx, req.RequiredRows()-req.NumRows())
rows := make([]chunk.Row, numToAppend)
for index := 0; index < numToAppend; index++ {
rows[index] = e.rowChunks.GetRow(e.rowPtrs[e.Idx])
e.Idx++
}
req.AppendRows(rows)
}
return nil
}
func (e *TopNExec) loadChunksUntilTotalLimit(ctx context.Context) error {
e.chkHeap = &topNChunkHeap{e}
e.rowChunks = chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize())
e.rowChunks.GetMemTracker().AttachTo(e.memTracker)
e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
for uint64(e.rowChunks.Len()) < e.totalLimit {
srcChk := tryNewCacheChunk(e.Children(0))
// adjust required rows by total limit
srcChk.SetRequiredRows(int(e.totalLimit-uint64(e.rowChunks.Len())), e.MaxChunkSize())
err := Next(ctx, e.Children(0), srcChk)
if err != nil {
return err
}
if srcChk.NumRows() == 0 {
break
}
e.rowChunks.Add(srcChk)
}
e.initPointers()
e.initCompareFuncs()
e.buildKeyColumns()
return nil
}
const topNCompactionFactor = 4
func (e *TopNExec) executeTopN(ctx context.Context) error {
heap.Init(e.chkHeap)
for uint64(len(e.rowPtrs)) > e.totalLimit {
// The number of rows we loaded may exceeds total limit, remove greatest rows by Pop.
heap.Pop(e.chkHeap)
}
childRowChk := tryNewCacheChunk(e.Children(0))
for {
err := Next(ctx, e.Children(0), childRowChk)
if err != nil {
return err
}
if childRowChk.NumRows() == 0 {
break
}
err = e.processChildChk(childRowChk)
if err != nil {
return err
}
if e.rowChunks.Len() > len(e.rowPtrs)*topNCompactionFactor {
err = e.doCompaction()
if err != nil {
return err
}
}
}
slices.SortFunc(e.rowPtrs, e.keyColumnsCompare)
return nil
}
func (e *TopNExec) processChildChk(childRowChk *chunk.Chunk) error {
for i := 0; i < childRowChk.NumRows(); i++ {
heapMaxPtr := e.rowPtrs[0]
var heapMax, next chunk.Row
heapMax = e.rowChunks.GetRow(heapMaxPtr)
next = childRowChk.GetRow(i)
if e.chkHeap.greaterRow(heapMax, next) {
// Evict heap max, keep the next row.
e.rowPtrs[0] = e.rowChunks.AppendRow(childRowChk.GetRow(i))
heap.Fix(e.chkHeap, 0)
}
}
return nil
}
// doCompaction rebuild the chunks and row pointers to release memory.
// If we don't do compaction, in a extreme case like the child data is already ascending sorted
// but we want descending top N, then we will keep all data in memory.
// But if data is distributed randomly, this function will be called log(n) times.
func (e *TopNExec) doCompaction() error {
newRowChunks := chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize())
newRowPtrs := make([]chunk.RowPtr, 0, e.rowChunks.Len())
for _, rowPtr := range e.rowPtrs {
newRowPtr := newRowChunks.AppendRow(e.rowChunks.GetRow(rowPtr))
newRowPtrs = append(newRowPtrs, newRowPtr)
}
newRowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks)
e.memTracker.ReplaceChild(e.rowChunks.GetMemTracker(), newRowChunks.GetMemTracker())
e.rowChunks = newRowChunks
e.memTracker.Consume(int64(-8 * len(e.rowPtrs)))
e.memTracker.Consume(int64(8 * len(newRowPtrs)))
e.rowPtrs = newRowPtrs
return nil
}
|
package controller
import (
"fmt"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
)
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runTenantWorker() {
for {
obj, shutdown := c.tenantWorkqueue.Get()
if shutdown {
continue
}
// We wrap this block in a func so we can defer c.tenantWorkqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.tenantWorkqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.tenantWorkqueue.Forget(obj)
level.Info(c.logger).Log(
"err", fmt.Sprintf("expected string in workqueue but got %#v", obj),
)
return nil
}
// Run resolveTenantState, passing it the namespace/name string of the tenant
// resource to be resolved.
if err := c.resolveTenantState(key); err != nil {
return errors.Wrapf(err, "error resolving tenant state for key: %s", key)
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.tenantWorkqueue.Forget(obj)
level.Debug(c.logger).Log("msg", "successfully synced Tenant resource", "key", key)
return nil
}(obj)
if err != nil {
level.Info(c.logger).Log(
"err", err,
"msg", "error in processing workqueue job",
)
continue
}
}
}
// enqueueTenant takes a tenant resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than tenant.
func (c *Controller) enqueueTenant(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
level.Info(c.logger).Log(
"err", err,
"msg", "getting cache key for object while enqueueing tenant",
)
return
}
c.tenantWorkqueue.AddRateLimited(key)
}
// handleTenantOwner will take any resource implementing metav1.Object and attempt
// to find the tenant resource that 'owns' it. It does this by looking at the
// objects metadata.ownerReferences field for an appropriate OwnerReference.
// If the object is "owned" by a Tenant resource, we will enqueue that tenant
// resource to be processed. If the object does not have an appropriate
// OwnerReference, it will simply be skipped.
func (c *Controller) handleTenantOwner(obj interface{}) {
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
level.Info(c.logger).Log("err", "error decoding object, invalid type")
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
level.Info(c.logger).Log("err", "error decoding object tombstone, invalid type")
return
}
level.Debug(c.logger).Log("msg", "recovered deleted object from tombstone", "name", object.GetName())
}
level.Debug(c.logger).Log("msg", "processing object", "name", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
// If this object is not owned by a tenant, we should not do anything more
// with it.
if ownerRef.Kind != "Tenant" {
return
}
tenant, err := c.tenantsLister.Tenants(object.GetNamespace()).Get(ownerRef.Name)
if err != nil {
level.Debug(c.logger).Log(
"msg", "ignoring orphaned object",
"object", object.GetSelfLink(),
"tenant", ownerRef.Name,
)
return
}
c.enqueueTenant(tenant)
return
}
}
|
package main
import (
"context"
"encoding/binary"
"github.com/juju/errors"
"github.com/pingcap/tidb/kv"
)
var (
emptyBytes = make([]byte, 0)
paddingZeros = make([]byte, 9)
errCorruptedData = errors.New("Failed to decode corrupted data")
)
const (
rdbEscapeLength = 9
signMask = 0x8000000000000000
)
func rdbEncodedSize(len int) int {
return (1 + ((len + (rdbEscapeLength - 2)) / (rdbEscapeLength - 1))) * rdbEscapeLength
}
func ensureCapacity(from []byte, extra int) []byte {
newSize := len(from) + extra
if cap(from) < newSize {
to := make([]byte, len(from), newSize)
copy(to, from)
return to
}
return from
}
// EncodeBytes encodes the slice to buffer with mem-comparable format from myrocks
func EncodeBytes(buffer []byte, src []byte) []byte {
buffer = ensureCapacity(buffer, rdbEncodedSize(len(src)))
for {
// Figure out how many bytes to copy, copy them and adjust pointers
var copyLen int
srcLen := len(src)
if rdbEscapeLength-1 < srcLen {
copyLen = rdbEscapeLength - 1
} else {
copyLen = srcLen
}
buffer = append(buffer, src[0:copyLen]...)
src = src[copyLen:]
// Are we at the end of the input?
if len(src) == 0 {
// pad with zeros if necessary;
paddingBytes := rdbEscapeLength - 1 - copyLen
if paddingBytes > 0 {
buffer = append(buffer, paddingZeros[0:paddingBytes]...)
}
// Put the flag byte (0 - N-1) in the output
buffer = append(buffer, byte(copyLen))
break
}
// We have more data - put the flag byte (N) in and continue
buffer = append(buffer, rdbEscapeLength)
}
return buffer
}
// DecodeBytes decodes the slice, return the left buffer, decoded slice or error
func DecodeBytes(from []byte) ([]byte, []byte, error) {
data := make([]byte, 0, len(from))
for {
if len(from) < rdbEscapeLength {
return nil, nil, errors.New("insufficient bytes to decode value")
}
groupBytes := from[:rdbEscapeLength]
group := groupBytes[:rdbEscapeLength-1]
realGroupSize := groupBytes[rdbEscapeLength-1]
if realGroupSize > rdbEscapeLength {
return nil, nil, errors.Errorf("invalid flag byte, group bytes %q", groupBytes)
}
from = from[rdbEscapeLength:]
if realGroupSize < rdbEscapeLength {
// Check validity of padding bytes.
for _, v := range group[realGroupSize:] {
if v != 0 {
return nil, nil, errors.Errorf("invalid padding byte, group bytes %q", groupBytes)
}
}
data = append(data, group[:realGroupSize]...)
break
} else {
data = append(data, group[:rdbEscapeLength-1]...)
}
}
return from, data, nil
}
// EncodeInt64 encodes the int64 to buffer with mem-comparable format.
func EncodeInt64(buffer []byte, i int64) []byte {
var data [8]byte
u := uint64(i) ^ signMask
binary.BigEndian.PutUint64(data[:], u)
return append(buffer, data[:]...)
}
// DecodeInt64 decodes the int64, return the left buffer, decoded int64 or error.
func DecodeInt64(from []byte) ([]byte, int64, error) {
if len(from) < 8 {
return nil, 0, errCorruptedData
}
u := binary.BigEndian.Uint64(from[:8])
return from[8:], int64(u ^ signMask), nil
}
func GetKeyType(key []byte) uint8 {
return key[len(key)-1]
}
func EncodeStringKey(buf []byte, key []byte) []byte {
key = EncodeBytes(buf, key)
buf = append(buf, key...)
buf = append(buf, String)
return buf
}
func EncodeHashMetaKey(buf []byte, key []byte) []byte {
key = EncodeBytes(buf, key)
buf = append(buf, key...)
buf = append(buf, HashMeta)
return buf
}
func EncodeHashField(buf []byte, key []byte, field []byte) []byte {
key = EncodeBytes(buf, key)
buf = append(buf, key...)
buf = append(buf, HashField)
field = EncodeBytes(buf, field)
buf = append(buf, field...)
return buf
}
func HandleDelete(db kv.Storage, key []byte) (interface{}, error) {
txn, err := db.Begin()
if err != nil {
return nil, err
}
defer txn.Rollback()
prefixKey := EncodeBytes(nil, key)
it, err := txn.Seek(prefixKey)
if err != nil {
return nil, err
}
defer it.Close()
var keys [][]byte
for it.Valid() && it.Key().HasPrefix(prefixKey) {
keys = append(keys, it.Key().Clone())
if err := it.Next(); err != nil {
return nil, err
}
}
hasKey := int64(0)
for _, key := range keys {
hasKey = 1
if err := txn.Delete(key); err != nil {
return nil, err
}
}
if err := txn.Commit(context.TODO()); err != nil {
return nil, err
}
return hasKey, nil
}
func SeekPrefix(txn kv.Transaction, key []byte) (kv.Iterator, error) {
seekKey := EncodeBytes(nil, key)
it, err := txn.Seek(seekKey)
if err != nil {
return nil, err
}
if !it.Valid() {
it.Close()
return nil, nil
}
if !it.Key().HasPrefix(key) {
it.Close()
return nil, nil
}
return it, nil
}
|
package main
import (
"fmt"
)
// 53. 最大子序和
// 给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
// 进阶:
// 如果你已经实现复杂度为 O(n) 的解法,尝试使用更为精妙的分治法求解。
// https://leetcode-cn.com/problems/maximum-subarray/
func main() {
// fmt.Println(maxSubArray3([]int{-2, 1, -3, 4, -1, 2, 1, -5, 4})) // 6
fmt.Println(maxSubArray3([]int{-2, 1})) // 1
}
// 法一:动态规划
// dp[i] 表示以nums[i]结尾的最大子序和
func maxSubArray(nums []int) (max int) {
n := len(nums)
if n == 0 {
return 0
}
dp := make([]int, len(nums))
dp[0] = nums[0]
max = nums[0]
for i := 1; i < n; i++ {
dp[i] = getMax(dp[i-1]+nums[i], nums[i])
max = getMax(max, dp[i])
}
return max
}
// 法二:对法一的优化
// best
func maxSubArray2(nums []int) (max int) {
n := len(nums)
if n == 0 {
return 0
}
max = nums[0]
pre := nums[0]
for i := 1; i < n; i++ {
pre = getMax(nums[i], pre+nums[i])
max = getMax(max, pre)
}
return max
}
func getMax(a, b int) int {
if a > b {
return a
}
return b
}
// 法三:分治
// 取一个中点,和最大的连续子数组要么在左侧,要么在右侧,要么穿过中间
// 三者取最大者就是结果
// O(nlogn)
func maxSubArray3(nums []int) (max int) {
n := len(nums)
if n == 0 {
return 0
} else if n == 1 {
return nums[0]
}
mid := n >> 1
maxLeft := maxSubArray3(nums[0:mid])
maxRight := maxSubArray3(nums[mid:])
// 计算中间向两边最大子序和
l := nums[mid-1] // 向左侧最大值
tmp := 0
for i := mid - 1; i >= 0; i-- {
tmp += nums[i]
l = getMax(tmp, l)
}
r := nums[mid] // 向右侧最大值
tmp = 0
for i := mid; i < n; i++ {
tmp += nums[i]
r = getMax(tmp, r)
}
return getMax(getMax(maxLeft, maxRight), l+r)
}
|
package main
//给定一个整数数组 prices ,它的第 i 个元素 prices[i] 是一支给定的股票在第 i 天的价格。
//
//设计一个算法来计算你所能获取的最大利润。你最多可以完成 k 笔交易。
//
//注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
//
//
//
//示例 1:
//
//输入:k = 2, prices = [2,4,1]
//输出:2
//解释:在第 1 天 (股票价格 = 2) 的时候买入,在第 2 天 (股票价格 = 4) 的时候卖出,这笔交易所能获得利润 = 4-2 = 2 。
func main() {
}
func maxProfit(k int, prices []int) int {
if len(prices) < 2 {
return 0
}
dp := make([][][2]int, len(prices))
for i := 0; i < len(dp); i++ {
dp[i] = make([][2]int, k+1)
}
for i := 0; i < len(prices); i++ {
for j := k; j >= 1; j-- {
if i == 0 {
dp[i][j][0] = 0
dp[i][j][1] = -prices[0]
} else {
dp[i][j][0] = max(dp[i-1][j][0], dp[i-1][j][1]+prices[i])
dp[i][j][1] = max(dp[i-1][j][1], dp[i-1][j-1][0]-prices[i])
}
}
}
return dp[len(prices)-1][k][0]
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
|
package inmemory
import (
"encoding/json"
"math/rand"
"os"
"path/filepath"
"sync"
"time"
"github.com/Tinee/go-graphql-chat/domain"
)
type Client struct {
u *userInMemory
ms *messagesInMemory
p *profileInMemory
}
func NewClient() *Client {
return &Client{
u: &userInMemory{
mtx: &sync.Mutex{},
},
ms: &messagesInMemory{
mtx: &sync.Mutex{},
},
p: &profileInMemory{
mtx: &sync.Mutex{},
},
}
}
func (c *Client) UserRepository() domain.UserRepository {
return c.u
}
func (c *Client) MessageRepository() domain.MessageRepository {
return c.ms
}
func (c *Client) ProfileRepository() domain.ProfileRepository {
return c.p
}
type mockData struct {
Profiles []domain.Profile `json:"profiles"`
Users []domain.User `json:"users"`
}
// FillWithMockData this is when I realized that it sucks to not having a database in development.
func (c *Client) FillWithMockData(path string) error {
abs, err := filepath.Abs(path)
if err != nil {
return err
}
f, err := os.Open(abs)
if err != nil {
return err
}
var data mockData
err = json.NewDecoder(f).Decode(&data)
if err != nil {
return err
}
for _, p := range data.Profiles {
c.p.profiles = append(c.p.profiles, p)
}
for _, u := range data.Users {
c.u.users = append(c.u.users, u)
}
return nil
}
func generateID() string {
rand.Seed(time.Now().UnixNano())
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, 20)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
|
package mbclient
func (c *MBClient) getTypeString(typeFilters []string) string {
typesString := "("
for i, filter := range typeFilters {
typesString += "type:" + filter
if i < len(typeFilters)-1 {
typesString += " OR "
}
}
typesString += ")"
return typesString
}
|
package multiply
import (
"fmt"
"strings"
)
func multiply(num1, num2 string) (mult string) {
baseNum := map[byte]int{
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'0': 0,
}
length1, length2 := len(num1), len(num2)
result := make([][]int, length1)
slot := 0
for i := length1 - 1; i >= 0; i-- {
result[i] = make([]int, length2+1)
for j := length2 - 1; j >= 0; j-- {
result[i][j+1] = baseNum[num1[i]]*baseNum[num2[j]] + slot
if result[i][j+1]/10 > 0 {
slot = result[i][j+1] / 10
result[i][j+1] %= 10
} else {
slot = 0
}
}
if slot != 0 {
result[i][0] = slot
}
slot = 0
}
slot1 := 0
for k := length1 - 1; k >= 0; k-- {
sum := slot1
slot1 = 0
for i, j := k, length2; i < length1 && j >= 0; i, j = i+1, j-1 {
sum += result[i][j]
}
if sum/10 > 0 {
slot1 = sum / 10
sum %= 10
}
mult = fmt.Sprintf("%d%s", sum, mult)
}
for k := length2 - 1; k >= 0; k-- {
sum := slot1
slot1 = 0
for i, j := 0, k; i < length1 && j >= 0; i, j = i+1, j-1 {
sum += result[i][j]
}
if sum/10 > 0 {
slot1 = sum / 10
sum %= 10
}
mult = fmt.Sprintf("%d%s", sum, mult)
}
mult = TrimPrefixZero(mult)
return mult
}
func TrimPrefixZero(str string) string {
for len(str) > 1 {
if str[0] == '0' {
str = strings.TrimPrefix(str, "0")
} else {
break
}
}
return str
}
|
package controller
import (
"fmt"
"log"
"net/http"
"local.ex/main/pages/about"
"local.ex/main/pages/home"
)
func Controller() {
port := "7890"
fmt.Printf("Starting server on port %q...\n", port)
http.HandleFunc("/", home.Page)
http.HandleFunc("/about", about.About)
err := http.ListenAndServe(":"+port, nil)
if err != nil {
log.Fatal("Error starting the HTTP server : ", err)
return
}
}
|
package main
import "fmt"
func main() {
var N int
fmt.Scanf("%d", &N)
for n := 0; n < N; n++ {
var x int
var sum int
fmt.Scanf("%d", &x)
for i := 1; i < x; i++ {
if x%i == 0 {
sum += i
}
}
if sum != x {
fmt.Printf("%d nao eh perfeito\n", x)
continue
}
fmt.Printf("%d eh perfeito\n", x)
}
}
|
package main
import (
"context"
"fmt"
"github.com/cmcpasserby/scli"
"github.com/cmcpasserby/unity-loader/unity"
)
func createSearchCmd() *scli.Command {
return &scli.Command{
Usage: "unity-loader search [partialVersion]",
ShortHelp: "Searches for a unity version on the archive site",
LongHelp: "Search for a unity version on the archive site, partial numbers can be listed and all matches will be returned",
ArgsValidator: scli.ExactArgs(1),
Exec: func(ctx context.Context, args []string) error {
results, err := unity.SearchArchive(args[0])
if err != nil {
return err
}
for _, ver := range results {
fmt.Printf("%s (%s)\n", ver.String(), ver.RevisionHash)
}
return nil
},
}
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"testing"
"time"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/stretchr/testify/require"
)
func TestCacheKey(t *testing.T) {
ctx := MockContext()
ctx.GetSessionVars().SnapshotTS = 0
ctx.GetSessionVars().SQLMode = mysql.ModeNone
ctx.GetSessionVars().TimeZone = time.UTC
ctx.GetSessionVars().ConnectionID = 0
ctx.GetSessionVars().InRestrictedSQL = false
variable.RestrictedReadOnly.Store(false)
variable.VarTiDBSuperReadOnly.Store(false)
key, err := NewPlanCacheKey(ctx.GetSessionVars(), "", "test", 1, 1, "", 0)
if err.Error() != "no statement text" {
t.Fail() // no statement text
}
key, err = NewPlanCacheKey(ctx.GetSessionVars(), "select 1", "", 1, 1, "", 0)
if err != nil {
t.Fail() // schema can be nil
}
key, err = NewPlanCacheKey(ctx.GetSessionVars(), "select 1", "test", 1, 1,
"select /*+ ignore_plan_cache() */ * from t", 0)
if err != nil {
t.Fail()
}
key, err = NewPlanCacheKey(ctx.GetSessionVars(), "select 1", "test", 1, 1, "", 0)
if err != nil {
t.Fail()
}
require.Equal(t, []byte{0x74, 0x65, 0x73, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x20, 0x31, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x74, 0x69, 0x64, 0x62, 0x74, 0x69, 0x6b, 0x76, 0x74, 0x69, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, key.Hash())
}
|
package release
import (
"archive/zip"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/ExploratoryEngineering/reto/pkg/gitutil"
"github.com/ExploratoryEngineering/reto/pkg/toolbox"
)
// Build builds a new release from the current setup
func Build(tagVersion, commitNewRelease bool) error {
fi, err := os.Stat(archiveDir)
if err != nil {
if err := os.Mkdir(archiveDir, toolbox.DefaultDirPerm); err != nil {
toolbox.PrintError("Could not create archive directory: %v", err)
return err
}
}
if err != nil {
toolbox.PrintError("Can't check status of archive dir: %v", err)
return err
}
if !fi.IsDir() {
toolbox.PrintError("%s is not a directory", archiveDir)
return errors.New("no archive")
}
ctx, err := GetContext()
if err != nil {
return err
}
if ctx.Released {
toolbox.PrintError("This version is already released. Bump the version and try again.")
return errors.New("already released")
}
if err := TemplatesComplete(ctx, true); err != nil {
return err
}
if err := VerifyConfig(ctx.Config, true); err != nil {
return err
}
if gitutil.HasChanges(ctx.Config.SourceRoot, true) {
toolbox.PrintError("There are uncommitted or unstaged changes in the current Git branch")
return errors.New("uncommitted changes")
}
if !NewFileVersions(ctx.Config, true) {
return errors.New("no file changes")
}
if err := os.Mkdir(fmt.Sprintf("%s/%s", releaseDir, ctx.Version), toolbox.DefaultDirPerm); err != nil {
toolbox.PrintError("Could not create release directory: %v", err)
return err
}
archivePath := fmt.Sprintf("%s/%s", archiveDir, ctx.Version)
if err := os.Mkdir(archivePath, toolbox.DefaultDirPerm); err != nil {
toolbox.PrintError("Unable to create archive directory %s: %v", archivePath, err)
return err
}
var checksumFiles []string // Files that should be checksummed
var tempFiles []string // temp files that should be deleted when done
for _, template := range ctx.Config.Templates {
workingCopy := fmt.Sprintf("release/%s", template.Name)
releasedCopy := fmt.Sprintf("%s/%s/%s", releaseDir, ctx.Version, template.Name)
archiveCopy := fmt.Sprintf("%s/%s/%s", archiveDir, ctx.Version, template.Name)
if err := mergeTemplate(workingCopy, releasedCopy, ctx); err != nil {
return err
}
if err := os.Remove(workingCopy); err != nil {
toolbox.PrintError("Could not remove template %s: %v", template, err)
return err
}
if err := toolbox.CopyFile(fmt.Sprintf("%s/%s", templateDir, template.Name), workingCopy); err != nil {
toolbox.PrintError("Could not copy %s to release directory: %v", workingCopy, err)
return err
}
if template.TemplateAction == ConcatenateAction {
if err := concatenateTemplate(template.Name, archiveCopy); err != nil {
return err
}
tempFiles = append(tempFiles, archiveCopy)
checksumFiles = append(checksumFiles, archiveCopy)
} else {
checksumFiles = append(checksumFiles, releasedCopy)
}
}
for _, target := range ctx.Config.Targets {
if err := buildRelease(ctx, target, archivePath, checksumFiles); err != nil {
return err
}
}
for _, file := range ctx.Config.Files {
checksumFiles = append(checksumFiles, file.Name)
}
if err := generateChecksumFile(ctx, checksumFiles); err != nil {
return err
}
// Remove the generate files in the archive folder
for _, v := range tempFiles {
if err := os.Remove(v); err != nil {
toolbox.PrintError("Could not remove temporary file at %s: %v", v, err)
return err
}
}
// Tag the commit with the new version
if tagVersion {
if err := gitutil.TagVersion(
ctx.Config.SourceRoot,
fmt.Sprintf("v%s", ctx.Version),
fmt.Sprintf("Release v%s (%s)", ctx.Version, ctx.Name)); err != nil {
return err
}
}
if commitNewRelease {
commitMessage := fmt.Sprintf(
`Release %s
Released version %s (%s)
`, ctx.Version, ctx.Version, ctx.Name)
var filesToCommit []string
for _, v := range ctx.Config.Templates {
filesToCommit = append(filesToCommit, fmt.Sprintf("%s/%s/%s", releaseDir, ctx.Version, filepath.Base(v.Name)))
filesToCommit = append(filesToCommit, fmt.Sprintf("release/%s", filepath.Base(v.Name)))
}
hash, err := gitutil.CreateCommit(
ctx.Config.SourceRoot,
commitMessage,
filesToCommit...)
if err != nil {
toolbox.PrintError("Could not commit the new release files: %v", err)
return err
}
fmt.Printf("New change log is committed as %s\n", hash[:6])
newCtx, err := BumpVersion(false, false, true)
if err != nil {
toolbox.PrintError("Could not autobump version: %v", err)
return nil
}
fmt.Printf("auto-bumped new version to %s\n", newCtx.Version)
}
return nil
}
func writeZipped(z *zip.Writer, header *zip.FileHeader, buf []byte) error {
header.Method = zip.Deflate
zf, err := z.CreateHeader(header)
if err != nil {
toolbox.PrintError("Could not create zip entry %s: %v", header.Name, err)
return err
}
if _, err := zf.Write(buf); err != nil {
return err
}
return nil
}
// buildRelease builds a release archive for a particular target
func buildRelease(ctx *Context, target, archivePath string, tempFiles []string) error {
archive := fmt.Sprintf("%s/%s-%s_%s.zip", archivePath, ctx.Version, ctx.Config.Name, target)
fmt.Printf("Building release archive %s \n", archive)
f, err := os.Create(archive)
if err != nil {
toolbox.PrintError("Unable to create archive file %s: %v", archive, err)
return err
}
zipWriter := zip.NewWriter(f)
comment := fmt.Sprintf("This archive contains an %s build for %s v%s (%s). Please see changelog for details.", ctx.Config.Name, target, ctx.Version, ctx.Name)
zipWriter.SetComment(comment)
defer zipWriter.Close()
for _, tempFile := range tempFiles {
buf, err := ioutil.ReadFile(tempFile)
if err != nil {
toolbox.PrintError("Could not read temp file %s: %v", tempFile, err)
return err
}
fmt.Printf(" - [template] %s\n", filepath.Base(tempFile))
tempHeader := &zip.FileHeader{
Name: filepath.Base(tempFile),
Method: zip.Deflate,
UncompressedSize64: uint64(len(buf)),
Modified: time.Now(),
}
if err := writeZipped(zipWriter, tempHeader, buf); err != nil {
return err
}
}
for _, v := range ctx.Config.Files {
if v.Target == anyTarget || v.Target == target {
fmt.Printf(" - [%s] %s\n", v.ID, v.Name)
buf, err := ioutil.ReadFile(v.Name)
if err != nil {
toolbox.PrintError("Unable to read file %s: %v", v.Name, err)
return err
}
fi, err := os.Stat(v.Name)
if err != nil {
toolbox.PrintError("Could not stat %s: %v", v.Name, err)
return err
}
header, err := zip.FileInfoHeader(fi)
if err != nil {
toolbox.PrintError("Could not create file info header for %s: %v", v.Name, err)
return err
}
writeZipped(zipWriter, header, buf)
}
}
return nil
}
|
package middlewares
import (
"devbook-api/src/authentication"
"devbook-api/src/responses"
"log"
"net/http"
)
func Logger(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
log.Printf("\n %s %s %s", r.Method, r.RequestURI, r.Host)
next(w, r)
}
}
func Authenticate(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if err := authentication.ValidToken(r); err != nil {
responses.Error(w, http.StatusUnauthorized, err)
return
}
next(w, r)
}
}
|
package animal
import (
"encoding/json"
"errors"
"github.com/game-explorer/animal-chess-server/model"
"github.com/game-explorer/animal-chess-server/repository"
)
type MessageRsp struct {
ToPlayerId int64
Msg model.Message
}
func buildJson(i interface{}) []byte {
bs, _ := json.Marshal(i)
return bs
}
func buildRsp(toPlayerIds []int64, msg model.Message) (r []MessageRsp) {
for _, playerId := range toPlayerIds {
r = append(r, MessageRsp{
ToPlayerId: playerId,
Msg: msg,
})
}
return
}
func getPlayerIdsInRoom(r repository.Interface, roomId int64) (ids []int64, err error) {
ps, err := r.GetPlayerByRoomId(roomId)
if err != nil {
return
}
ids = make([]int64, len(ps))
for i, v := range ps {
ids[i] = v.Id
}
return
}
func getRoomByPlayer(r repository.Interface, playerId int64) (room model.Room, exist bool, err error) {
p, _, e := r.GetPlayer(playerId)
if e != nil {
err = e
return
}
room, exist, err = r.GetRoom(p.InRoomId)
if err != nil {
return
}
return
}
func getRoomByPlayerMust(r repository.Interface, playerId int64) (room model.Room, err error) {
var exist bool
room, exist, err = getRoomByPlayer(r, playerId)
if err != nil {
return
}
if !exist {
err = errors.New("not found room")
return
}
return
}
func getPlayerIdsByPlayer(r repository.Interface, playerId int64) (ids []int64, err error) {
p, _, e := r.GetPlayer(playerId)
if e != nil {
err = e
return
}
room, exist, e := r.GetRoom(p.InRoomId)
if e != nil {
err = e
return
}
if !exist {
err = errors.New("not found room")
return
}
return getPlayerIdsInRoom(r, room.Id)
}
|
package main
import (
"os"
"fmt"
"errors"
"strings"
)
type Config struct {
LogLevel int
ListenHost string
ListenPort int
GitLabUrl string
GitLabToken string
LabelPrefix string
LabelColor string
IgnoreUser string
}
func (config *Config) loadDefault() {
config.LogLevel = LOG_DEBUG
config.ListenHost = "localhost"
config.ListenPort = 8081
config.GitLabUrl = "https://gitlab.com/"
config.LabelPrefix = "-"
config.LabelColor = "#DDDDDD"
config.IgnoreUser = "~"
}
func (config *Config) populate() error {
config.loadDefault()
config.LogLevel = getEnvInt("LOG_LEVEL", config.LogLevel)
config.ListenHost = getEnvString("LISTEN_HOST", config.ListenHost)
config.ListenPort = getEnvInt("LISTEN_PORT", config.ListenPort)
config.GitLabUrl = strings.TrimRight(getEnvString("GITLAB_URL", config.GitLabUrl), "/")
config.GitLabToken = getEnvString("GITLAB_TOKEN", config.GitLabToken)
config.LabelPrefix = getEnvString("LABEL_PREFIX", config.LabelPrefix)
config.LabelColor = getEnvString("LABEL_COLOR", config.LabelColor)
config.IgnoreUser = getEnvString("IGNORE_USER", config.IgnoreUser)
err := config.validate()
return err
}
func (config *Config) validate() error {
if config.LogLevel < 1 || config.LogLevel > 3 {
return errors.New("invalid LOG_LEVEL. Should be a number between 1 and 3")
}
if config.ListenPort == 0 {
return errors.New("invalid LISTEN_PORT. Should be a positive number")
}
if config.GitLabUrl == "" || (!strings.HasPrefix(config.GitLabUrl, "http://") && !strings.HasPrefix(config.GitLabUrl, "https://")) {
return errors.New("invalid GITLAB_URL. Should be a URL starting with http:// or https://")
}
if config.GitLabToken == "" {
return errors.New("empty GITLAB_TOKEN")
}
return nil
}
func getEnvString(name string, def string) string {
envVal := os.Getenv(name)
if envVal == "" {
return def
} else
{
return envVal
}
}
func getEnvInt(name string, def int) int {
envVal := os.Getenv(name)
if envVal == "" {
return def
}
var result int
fmt.Sscan(envVal, &result)
return result
}
|
package main
import (
"github.com/gorilla/mux"
"github.com/hellofresh/health-go"
"time"
)
func status(r *mux.Router) {
health.Register(health.Config{
Name: "server",
Timeout: time.Second * 5,
SkipOnErr: false,
Check: func() error {
// rabbitmq health check implementation goes here
return nil
},
})
r.Handle("/status", health.Handler())
}
|
package module
import (
"fmt"
"buddin.us/eolian/dsp"
"github.com/mitchellh/mapstructure"
)
func init() {
Register("PanMix", func(c Config) (Patcher, error) {
var config struct {
Size int
}
if err := mapstructure.Decode(c, &config); err != nil {
return nil, err
}
if config.Size == 0 {
config.Size = 4
}
return newPanMix(config.Size)
})
}
type panMix struct {
multiOutIO
master *In
sources, levels, pans []*In
a, b dsp.Frame
}
func newPanMix(size int) (*panMix, error) {
m := &panMix{
master: NewInBuffer("master", dsp.Float64(1)),
a: dsp.NewFrame(),
b: dsp.NewFrame(),
}
inputs := []*In{m.master}
for i := 0; i < size; i++ {
in := NewInBuffer(fmt.Sprintf("%d/input", i), dsp.Float64(0))
level := NewInBuffer(fmt.Sprintf("%d/level", i), dsp.Float64(1))
pan := NewInBuffer(fmt.Sprintf("%d/pan", i), dsp.Float64(0))
m.sources = append(m.sources, in)
m.levels = append(m.levels, level)
m.pans = append(m.pans, pan)
inputs = append(inputs, in, level, pan)
}
return m, m.Expose("PanMix", inputs, []*Out{
{Name: "a", Provider: provideCopyOut(m, &m.a)},
{Name: "b", Provider: provideCopyOut(m, &m.b)},
})
}
func (m *panMix) Process(out dsp.Frame) {
m.incrRead(func() {
master := m.master.ProcessFrame()
for i := 0; i < len(m.sources); i++ {
m.sources[i].ProcessFrame()
m.levels[i].ProcessFrame()
m.pans[i].ProcessFrame()
}
for i := range out {
var aSum, bSum dsp.Float64
for j := 0; j < len(m.sources); j++ {
signal := m.sources[j].LastFrame()[i] * m.levels[j].LastFrame()[i]
bias := m.pans[j].LastFrame()[i]
if bias > 0 {
aSum += (1 - bias) * signal
bSum += signal
} else if bias < 0 {
aSum += signal
bSum += (1 + bias) * signal
} else {
aSum += signal
bSum += signal
}
}
m.a[i] = aSum * master[i]
m.b[i] = bSum * master[i]
}
})
}
|
package couchdb
import (
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"time"
)
type CouchDB struct {
client *http.Client
url *url.URL
}
type CouchDBConfig struct {
Host string
Database string
Username string
Password string
Timeout int
}
// Create new CouchDB from config
func NewCouchDB(cfg CouchDBConfig) (*CouchDB, error) {
if cfg.Timeout == 0 {
cfg.Timeout = 10
}
client := &http.Client{
Timeout: time.Duration(cfg.Timeout) * time.Second,
}
u, err := url.Parse(cfg.Host)
if err != nil {
return nil, ErrInitFailed
}
u.User = url.UserPassword(
cfg.Username,
cfg.Password,
)
u.Path = "/" + cfg.Database
couchdb := &CouchDB{
client: client,
url: u,
}
return couchdb, nil
}
// Response from database
type DBResp struct {
Rows []struct {
Id string `json:"id"`
Key string `json:"key"`
Value interface{} `json:"value"`
Doc interface{} `json:"doc"`
} `json:"rows"`
}
// Send http request to database and read the response
func (c *CouchDB) requestDB(path string) ([]byte, error) {
if path == "" {
return nil, errRequestFailed
}
resp, err := c.client.Get(c.url.String() + path)
if err != nil {
return nil, errRequestFailed
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
if resp.StatusCode == 404 {
return nil, errNotFound
}
return nil, errRequestFailed
}
result, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errRequestFailed
}
return result, nil
}
// Get all docs from database
func (c *CouchDB) GetAllDocs() ([]interface{}, error) {
path := "/_all_docs?include_docs=true"
dbRespBytes, err := c.requestDB(path)
if err != nil {
return nil, ErrCannotGetDocs
}
var dbResp DBResp
if err = json.Unmarshal(dbRespBytes, &dbResp); err != nil {
return nil, ErrCannotGetDocs
}
result := make([]interface{}, len(dbResp.Rows))
for i, row := range dbResp.Rows {
result[i] = row.Doc
}
return result, nil
}
// Get doc from database
func (c *CouchDB) GetDoc(docID string) (interface{}, error) {
path := "/" + docID
dbRespBytes, err := c.requestDB(path)
if err != nil {
if err == errNotFound {
return nil, ErrDocNotFound
}
return nil, ErrCannotGetDocs
}
var result interface{}
if err = json.Unmarshal(dbRespBytes, &result); err != nil {
return nil, ErrCannotGetDocs
}
return result, nil
}
|
package fakes
type FakeTemplateDeleter struct {
DeleteArgument string
DeleteError error
}
func NewFakeTemplateDeleter() *FakeTemplateDeleter {
return &FakeTemplateDeleter{}
}
func (fake *FakeTemplateDeleter) Delete(templateName string) error {
fake.DeleteArgument = templateName
return fake.DeleteError
}
|
// Copyright 2016 The G3N Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package constraint
import (
"github.com/hecate-tech/engine/experimental/physics/equation"
)
// Distance is a distance constraint.
// Constrains two bodies to be at a constant distance from each others center of mass.
type Distance struct {
Constraint
distance float32 // Distance
equation *equation.Contact
}
// NewDistance creates and returns a pointer to a new Distance constraint object.
func NewDistance(bodyA, bodyB IBody, distance, maxForce float32) *Distance {
dc := new(Distance)
dc.initialize(bodyA, bodyB, true, true)
// Default distance should be: bodyA.position.distanceTo(bodyB.position)
// Default maxForce should be: 1e6
dc.distance = distance
dc.equation = equation.NewContact(bodyA, bodyB, -maxForce, maxForce) // Make it bidirectional
dc.AddEquation(dc.equation)
return dc
}
// Update updates the equation with data.
func (dc *Distance) Update() {
halfDist := dc.distance * 0.5
posA := dc.bodyA.Position()
posB := dc.bodyB.Position()
normal := posB.Sub(&posA)
normal.Normalize()
dc.equation.SetRA(normal.Clone().MultiplyScalar(halfDist))
dc.equation.SetRB(normal.Clone().MultiplyScalar(-halfDist))
}
|
package main
import (
"app/base/core"
"app/listener"
"app/manager"
"log"
"os"
)
func main() {
core.ConfigureApp()
if len(os.Args) > 1 {
switch os.Args[1] {
case "listener":
listener.RunListener()
return
case "manager":
manager.RunManager()
return
}
}
log.Fatal("You need to provide a command")
}
|
package kubectl
import (
"context"
"io"
"net"
"net/http"
"net/url"
"sort"
"time"
"github.com/devspace-cloud/devspace/pkg/devspace/config/generated"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/util"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl/portforward"
"github.com/devspace-cloud/devspace/pkg/util/kubeconfig"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/survey"
"github.com/mgutz/ansi"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
// Client holds all kubect functions
type Client interface {
CurrentContext() string
KubeClient() kubernetes.Interface
Namespace() string
RestConfig() *rest.Config
KubeConfigLoader() kubeconfig.Loader
PrintWarning(generatedConfig *generated.Config, noWarning, shouldWait bool, log log.Logger) error
CopyFromReader(pod *k8sv1.Pod, container, containerPath string, reader io.Reader) error
Copy(pod *k8sv1.Pod, container, containerPath, localPath string, exclude []string) error
ExecStreamWithTransport(options *ExecStreamWithTransportOptions) error
ExecStream(options *ExecStreamOptions) error
ExecBuffered(pod *k8sv1.Pod, container string, command []string, input io.Reader) ([]byte, []byte, error)
GenericRequest(options *GenericRequestOptions) (string, error)
ReadLogs(namespace, podName, containerName string, lastContainerLog bool, tail *int64) (string, error)
LogMultipleTimeout(imageSelector []string, interrupt chan error, tail *int64, writer io.Writer, timeout time.Duration, log log.Logger) error
LogMultiple(imageSelector []string, interrupt chan error, tail *int64, writer io.Writer, log log.Logger) error
Logs(ctx context.Context, namespace, podName, containerName string, lastContainerLog bool, tail *int64, follow bool) (io.ReadCloser, error)
GetUpgraderWrapper() (http.RoundTripper, UpgraderWrapper, error)
EnsureDefaultNamespace(log log.Logger) error
EnsureGoogleCloudClusterRoleBinding(log log.Logger) error
GetRunningPodsWithImage(imageNames []string, namespace string, maxWaiting time.Duration) ([]*k8sv1.Pod, error)
GetNewestRunningPod(labelSelector string, imageSelector []string, namespace string, maxWaiting time.Duration) (*k8sv1.Pod, error)
NewPortForwarder(pod *k8sv1.Pod, ports []string, addresses []string, stopChan chan struct{}, readyChan chan struct{}, errorChan chan error) (*portforward.PortForwarder, error)
IsLocalKubernetes() bool
}
type client struct {
Client kubernetes.Interface
ClientConfig clientcmd.ClientConfig
restConfig *rest.Config
kubeLoader kubeconfig.Loader
currentContext string
namespace string
}
// NewDefaultClient creates the new default kube client from the active context @Factory
func NewDefaultClient() (Client, error) {
return NewClientFromContext("", "", false, kubeconfig.NewLoader())
}
// NewClientFromContext creates a new kubernetes client from given context @Factory
func NewClientFromContext(context, namespace string, switchContext bool, kubeLoader kubeconfig.Loader) (Client, error) {
// Load new raw config
kubeConfigOriginal, err := kubeLoader.LoadRawConfig()
if err != nil {
return nil, err
}
// We clone the config here to avoid changing the single loaded config
kubeConfig := clientcmdapi.Config{}
err = util.Convert(&kubeConfigOriginal, &kubeConfig)
if err != nil {
return nil, err
}
if len(kubeConfig.Clusters) == 0 {
return nil, errors.Errorf("kube config is invalid: please make sure you have an existing valid kube config")
}
// If we should use a certain kube context use that
var (
activeContext = kubeConfig.CurrentContext
activeNamespace = metav1.NamespaceDefault
saveConfig = false
)
// Set active context
if context != "" && activeContext != context {
activeContext = context
if switchContext {
kubeConfig.CurrentContext = activeContext
saveConfig = true
}
}
// Set active namespace
if kubeConfig.Contexts[activeContext] != nil {
if kubeConfig.Contexts[activeContext].Namespace != "" {
activeNamespace = kubeConfig.Contexts[activeContext].Namespace
}
if namespace != "" && activeNamespace != namespace {
activeNamespace = namespace
kubeConfig.Contexts[activeContext].Namespace = activeNamespace
if switchContext {
saveConfig = true
}
}
}
// Should we save the kube config?
if saveConfig {
err = kubeLoader.SaveConfig(&kubeConfig)
if err != nil {
return nil, errors.Errorf("Error saving kube config: %v", err)
}
}
clientConfig := clientcmd.NewNonInteractiveClientConfig(kubeConfig, activeContext, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules())
if kubeConfig.Contexts[activeContext] == nil {
return nil, errors.Errorf("Error loading kube config, context '%s' doesn't exist", activeContext)
}
restConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, errors.Wrap(err, "new client")
}
return &client{
Client: kubeClient,
ClientConfig: clientConfig,
restConfig: restConfig,
kubeLoader: kubeLoader,
namespace: activeNamespace,
currentContext: activeContext,
}, nil
}
// NewClientBySelect creates a new kubernetes client by user select @Factory
func NewClientBySelect(allowPrivate bool, switchContext bool, kubeLoader kubeconfig.Loader, log log.Logger) (Client, error) {
kubeConfig, err := kubeLoader.LoadRawConfig()
if err != nil {
return nil, err
}
// Get all kube contexts
options := make([]string, 0, len(kubeConfig.Contexts))
for context := range kubeConfig.Contexts {
options = append(options, context)
}
if len(options) == 0 {
return nil, errors.New("No kubectl context found. Make sure kubectl is installed and you have a working kubernetes context configured")
}
sort.Strings(options)
for true {
kubeContext, err := log.Question(&survey.QuestionOptions{
Question: "Which kube context do you want to use",
DefaultValue: kubeConfig.CurrentContext,
Options: options,
})
if err != nil {
return nil, err
}
// Check if cluster is in private network
if allowPrivate == false {
context := kubeConfig.Contexts[kubeContext]
cluster := kubeConfig.Clusters[context.Cluster]
url, err := url.Parse(cluster.Server)
if err != nil {
return nil, errors.Wrap(err, "url parse")
}
ip := net.ParseIP(url.Hostname())
if ip != nil {
if IsPrivateIP(ip) {
log.Infof("Clusters with private ips (%s) cannot be used", url.Hostname())
continue
}
}
}
return NewClientFromContext(kubeContext, "", switchContext, kubeLoader)
}
return nil, errors.New("We should not reach this point")
}
// PrintWarning prints a warning if the last kube context is different than this one
func (client *client) PrintWarning(generatedConfig *generated.Config, noWarning, shouldWait bool, log log.Logger) error {
if generatedConfig != nil && log.GetLevel() >= logrus.InfoLevel && noWarning == false {
// print warning if context or namespace has changed since last deployment process (expect if explicitly provided as flags)
if generatedConfig.GetActive().LastContext != nil {
wait := false
if generatedConfig.GetActive().LastContext.Context != "" && generatedConfig.GetActive().LastContext.Context != client.currentContext {
log.WriteString("\n")
log.Warnf(ansi.Color("Are you using the correct kube context?", "white+b"))
log.Warnf("Current kube context: '%s'", ansi.Color(client.currentContext, "white+b"))
log.Warnf("Last kube context: '%s'", ansi.Color(generatedConfig.GetActive().LastContext.Context, "white+b"))
log.WriteString("\n")
log.Infof("Use the '%s' flag to switch to the context and namespace previously used to deploy this project", ansi.Color("-s / --switch-context", "white+b"))
log.Infof("Or use the '%s' flag to ignore this warning", ansi.Color("--no-warn", "white+b"))
wait = true
} else if generatedConfig.GetActive().LastContext.Namespace != "" && generatedConfig.GetActive().LastContext.Namespace != client.namespace {
log.WriteString("\n")
log.Warnf(ansi.Color("Are you using the correct namespace?", "white+b"))
log.Warnf("Current namespace: '%s'", ansi.Color(client.namespace, "white+b"))
log.Warnf("Last namespace: '%s'", ansi.Color(generatedConfig.GetActive().LastContext.Namespace, "white+b"))
log.WriteString("\n")
log.Infof("Use the '%s' flag to switch to the context and namespace previously used to deploy this project", ansi.Color("-s / --switch-context", "white+b"))
log.Infof("Or use the '%s' flag to ignore this warning", ansi.Color("--no-warn", "white+b"))
wait = true
}
if wait && shouldWait {
log.StartWait("Will continue in 10 seconds...")
time.Sleep(10 * time.Second)
log.StopWait()
log.WriteString("\n")
}
}
// Warn if using default namespace unless previous deployment was also to default namespace
if shouldWait && client.namespace == metav1.NamespaceDefault && (generatedConfig.GetActive().LastContext == nil || generatedConfig.GetActive().LastContext.Namespace != metav1.NamespaceDefault) {
log.Warn("Deploying into the 'default' namespace is usually not a good idea as this namespace cannot be deleted\n")
log.StartWait("Will continue in 5 seconds...")
time.Sleep(5 * time.Second)
log.StopWait()
}
}
// Info messages
log.Infof("Using kube context '%s'", ansi.Color(client.currentContext, "white+b"))
log.Infof("Using namespace '%s'", ansi.Color(client.namespace, "white+b"))
return nil
}
func (client *client) CurrentContext() string {
return client.currentContext
}
func (client *client) KubeClient() kubernetes.Interface {
return client.Client
}
func (client *client) Namespace() string {
return client.namespace
}
func (client *client) RestConfig() *rest.Config {
return client.restConfig
}
func (client *client) KubeConfigLoader() kubeconfig.Loader {
return client.kubeLoader
}
|
package launchpad
import (
"gitlab.com/gomidi/midi"
)
type scrollingTextBuilderS struct {
Seq []byte
outputStream midi.Out
}
func (l *LaunchpadS) Text(color Color) ScrollingTextBuilder {
return l.text(color, false)
}
func (l *LaunchpadS) TextLoop(color Color) ScrollingTextBuilder {
return l.text(color, true)
}
func (l *LaunchpadS) text(color Color, loop bool) ScrollingTextBuilder {
c := color.AsBytes()[0]
if loop {
c += 64
}
return &scrollingTextBuilderS{
Seq: []byte{0xF0, 0x00, 0x20, 0x29, 0x09, c},
outputStream: l.outputStream,
}
}
func (s *scrollingTextBuilderS) Add(speed byte, text string) ScrollingTextBuilder {
if speed > 7 {
speed = 7
} else if speed < 1 {
speed = 1
}
s.Seq = append(s.Seq, speed)
s.Seq = append(s.Seq, []byte(text)...)
return s
}
func (s *scrollingTextBuilderS) Perform() error {
s.Seq = append(s.Seq, 0xF7)
// the syntax of the scrolling text message:
// F0 00 20 29 09 <colour> <text inclusive speed ...> F7
_, err := s.outputStream.Write(s.Seq)
return err
}
|
package main
import(
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/gob"
"encoding/pem"
"fmt"
"os"
)
//Function main generates an rsa key with a size of 512 bytes, chekcs for errors.
//Prints genrated private key primes and exponent
//Prints genrated publicKey modulus and exponent
//Saves the private and public key to an outFile using the saveGobKey function
func main() {
reader := rand.Reader
bitSize := 512
key, err := rsa.GenerateKey(reader, bitSize)
checkError(err)
fmt.Println("Private key primes: ", key.Primes[0].String(), key.Primes[1].String())
fmt.Println("Private key exponent: ", key.D.String())
publicKey := key.PublicKey
fmt.Println("Public key mod: ", publicKey.N.String())
fmt.Println("Public key exponent: ", publicKey.E)
saveGobKey("private.key", key)
saveGobKey("public.key", publicKey)
savePemKey("private.pem", key)
}
//Function creates a file and encodes its content
func saveGobKey(fileName string, key interface{}) {
outFile, err := os.Create(fileName)
checkError(err)
encoder := gob.NewEncoder(outFile)
err = encoder.Encode(key)
checkError(err)
outFile.Close()
}
//Function creates a file and encodes its content
func savePemKey(fileName string, key *rsa.PrivateKey) {
outFile, err := os.Create(fileName)
checkError(err)
var privateKey = &pem.Block{Type: "RSA Private Key",
Bytes: x509.MarshalPKCS1PrivateKey(key)}
pem.Encode(outFile, privateKey)
outFile.Close()
}
//Function checks for errors and prints them if any
func checkError(err error) {
if err != nil {
fmt.Println("Error", err.Error())
os.Exit(1)
}
}
|
package main
import (
"bytes"
"crypto/cipher"
"testing"
)
func TestXORCipher(t *testing.T) {
cases := []struct {
stream cipher.Stream
src, want []byte
}{
{
NewXORCipher([]byte{1, 2}),
[]byte{1, 2, 3, 4, 5, 6},
[]byte{0, 0, 2, 6, 4, 4},
},
{
NewXORCipher([]byte{1, 2, 3}),
[]byte{1, 2, 3, 4, 5, 6},
[]byte{0, 0, 0, 5, 7, 5},
},
{
NewXORCipher([]byte{1, 2, 3, 4}),
[]byte{1, 2, 3, 4, 5, 6},
[]byte{0, 0, 0, 0, 4, 4},
},
}
for _, c := range cases {
dst := make([]byte, len(c.src))
c.stream.XORKeyStream(dst, c.src)
if !bytes.Equal(dst, c.want) {
t.Errorf("got %v, want %v", dst, c.want)
}
}
}
|
package inc
import (
"bufio"
"os"
"regexp"
)
func FGrepBool(file string, reg *regexp.Regexp) bool {
if f, err := os.Open(file); err == nil {
buf := bufio.NewReader(f)
for {
line, err := buf.ReadBytes('\n')
if err != nil {
return false
}
if reg.Match(line) {
return true
}
}
}
return false
}
func FGrepLine(file string, reg *regexp.Regexp) []byte {
if f, err := os.Open(file); err == nil {
buf := bufio.NewReader(f)
for {
line, err := buf.ReadBytes('\n')
if err != nil {
return []byte("")
}
if reg.Match(line) {
return line
}
}
}
return []byte("")
}
|
package checksum
import (
"bufio"
"math"
"strconv"
"strings"
)
// Checksum returns a spreadsheet checksum.
// For each row, determine the difference between the largest
// value and the smallest value; the checksum is the sum of all
// of these differences
func Checksum(spreadsheet string) int {
var checksum = 0
var hi = 0
var lo = math.MaxInt32
var calculator = func(n int) {
if hi < n {
hi = n
}
if lo > n {
lo = n
}
}
calculate(spreadsheet, calculator, func() {
checksum += hi - lo
hi = 0
lo = math.MaxInt32
})
return checksum
}
// EvenlyDivisibleValues finds the only two numbers in each row
// where one evenly divides the other - that is,
// where the result of the division operation is a whole number.
// They would like you to find those numbers on each line, divide them,
// and add up each line's result
func EvenlyDivisibleValues(spreadsheet string) int {
var checksum = 0
var nums []int
calculate(spreadsheet, func(n int) { nums = append(nums, n) }, func() {
for i, a := range nums {
for _, b := range nums[i+1:] {
if a%b == 0 || b%a == 0 {
checksum += a/b + b/a
nums = nil
return
}
}
}
})
return checksum
}
func calculate(spreadsheet string, f func(n int), accumulator func()) {
scanner := bufio.NewScanner(strings.NewReader(spreadsheet))
for scanner.Scan() {
nums := bufio.NewScanner(strings.NewReader(scanner.Text()))
nums.Split(bufio.ScanWords)
for nums.Scan() {
n, _ := strconv.Atoi(nums.Text())
f(n)
}
accumulator()
}
}
|
package cmd
import (
"bufio"
"bytes"
"io"
"time"
)
type Reader struct {
reader io.Reader
BytesRead int
}
func newReader(r io.Reader) *Reader {
return &Reader{reader: r}
}
func (r *Reader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
r.BytesRead += n
return n, err
}
type Message struct {
Received time.Time
Line int
Stderr bool
Message string
}
type Writer struct {
line int
stderr bool
ch chan Message
}
func NewWriter() (*Writer, *Writer) {
ch := make(chan Message, 1024)
return &Writer{line: 0, stderr: false, ch: ch}, &Writer{line: 0, stderr: true, ch: ch}
}
func (w *Writer) Chan() <-chan Message {
return w.ch
}
func (w *Writer) Write(p []byte) (int, error) {
reader := newReader(bytes.NewReader(p))
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
w.ch <- Message{
Received: time.Now(),
Line: w.line,
Stderr: w.stderr,
Message: scanner.Text(),
}
w.line++
}
if scanner.Err() != io.EOF && scanner.Err() != nil {
return reader.BytesRead, scanner.Err()
}
return reader.BytesRead, nil
}
func (w *Writer) Close() error {
close(w.ch)
return nil
}
|
package clubs
import (
"os"
"github.com/anihouse/bot/app"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
var (
log *logrus.Logger
conf cfg
)
type module struct {
app *app.Module
enabled bool
}
func (module) ID() string {
return "clubs"
}
func (m module) IsEnabled() bool {
return m.enabled
}
func (module) LoadConfig(path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
err = yaml.NewDecoder(file).Decode(&conf)
if err != nil {
return err
}
return nil
}
func (module) SetLogger(logger *logrus.Logger) {
log = logger
}
func (m *module) Init(prefix string) error {
m.app = app.NewModule(_module.ID(), prefix)
m.app.On("clubcreate").Handle(onClubCreeate)
m.app.On("clubapply").Handle(onClubApply)
return nil
}
func (m *module) Enable() {
m.enabled = true
m.app.Enable()
}
func (m *module) Disable() {
m.enabled = false
m.app.Disable()
}
|
package bytestrings
import (
"testing"
)
func TestWorkWithBuffer(t *testing.T) {
err := WorkWithBuffer()
if err != nil {
t.Errorf("unexpected error")
}
}
|
package main
import "fmt"
func main() {
names := []string{}
names[0] = "Goku"
fmt.Println(names)
}
|
package main
import (
"encoding/json"
"fmt"
)
//序列化:把go语言中的结构体变量-->json格式字符串
//反序列化:把json格式字符串 --> go语言可以识别的结构体变量
type person struct{
Name string //首字母大写,因为是json转换的,所以json需要拿到该变量,所以需要首字母大写
Age int
}
//下面的方法可以使变量名为小写字母开头
type person2 struct{
Name string `json:"name",db:"name",ini:"name"` //表示在json 数据库 ini配置文件以小写的
Age int `json:"age"`
}
func main(){
p1:=person{
Name: "hello",
Age: 18,
}
//序列化
b,err:=json.Marshal(p1)
if err!=nil{
fmt.Printf("Mashal fail,err:%v",err)
return
}
fmt.Printf("%v\n",string(b))
fmt.Printf("%#v\n",string(b))
//反序列化
str:=`{"name":"hello","age":18}`
var p2 person2
json.Unmarshal([]byte(str),&p2) //传指针是为了可以在json.Unmarshal函数内部修改p2的值
fmt.Printf("%v\n",p2)
fmt.Printf("%#v\n",p2)
} |
package worker
import (
"fmt"
"testing"
"time"
"github.com/brunoga/context"
)
func TestWorker_New_NilWorkerFunc(t *testing.T) {
w, err := New(nil)
if w != nil {
t.Errorf("Expected nil Worker.")
}
if err != ErrNilWorkerFunc {
t.Errorf("Expected ErrNilWorkerFunc error. Got %q.", err)
}
}
func TestWorker_New_Success(t *testing.T) {
w, err := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
if w == nil {
t.Errorf("Expected non-nil Worker.")
}
}
func TestWorker_GetInputChannel_AllocChannel_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
ic, err := w.GetInputChannel()
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
if ic == nil {
t.Errorf("Expected non-nil input channel.")
}
}
func TestWorker_GetInputChannel_ExistingChannel_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
ic1, _ := w.GetInputChannel()
ic2, err := w.GetInputChannel()
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
if ic2 == nil {
t.Errorf("Expected non-nil input channel.")
}
if ic2 != ic1 {
t.Errorf(
"First and second returned input channels do not match")
}
}
func TestWorker_SetInputChannel_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
ic1 := make(chan interface{})
err := w.SetInputChannel(ic1)
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
ic2, _ := w.GetInputChannel()
if ic1 != ic2 {
t.Errorf("Internal and set input channels do not match")
}
}
func TestWorker_GetOutputChannel_AllocChannel_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
oc, err := w.GetOutputChannel()
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
if oc == nil {
t.Errorf("Expected non-nil output channel.")
}
}
func TestWorker_GetOutputChannel_ExistingChannel_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
oc1, _ := w.GetOutputChannel()
oc2, err := w.GetOutputChannel()
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
if oc2 == nil {
t.Errorf("Expected non-nil output channel.")
}
if oc2 != oc1 {
t.Errorf("First and second returned output channels do not " +
"match")
}
}
func TestWorker_SetOutputChannel_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
oc1 := make(chan interface{})
err := w.SetOutputChannel(oc1)
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
oc2, _ := w.GetOutputChannel()
if oc1 != oc2 {
t.Errorf("Internal and set output channels do not match")
}
}
func TestWorker_Start_NilContext(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
err := w.Start(nil)
if err != ErrNilContext {
t.Errorf("Expected ErrNilContext error. Got %q.", err)
}
}
func TestWorker_Start_NilOutputChannel(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
_, _ = w.GetInputChannel()
err := w.Start(context.Background())
if err != ErrNilOutputChannel {
t.Errorf("Expected ErrNilOutputChannel error. Got %q.", err)
}
}
func TestWorker_Start_NilInputChannel(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
_, _ = w.GetOutputChannel()
err := w.Start(context.Background())
if err != ErrNilInputChannel {
t.Errorf("Expected ErrNilInputChannel error. Got %q.", err)
}
}
func TestWorker_Start_Success(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
oc := make(chan interface{})
_ = w.SetOutputChannel(oc)
_, _ = w.GetInputChannel()
err := w.Start(context.Background())
if err != nil {
t.Errorf("Expected nil error. Got %q.", err)
}
close(oc)
}
func TestWorker_Start_AlreadyStarted(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
_, _ = w.GetOutputChannel()
ic := make(chan interface{})
_ = w.SetInputChannel(ic)
_ = w.Start(context.Background())
err := w.Start(context.Background())
if err != ErrAlreadyStarted {
t.Errorf("Expected ErrAlreadyStarted error. Got %q.", err)
}
close(ic)
}
func TestWorker_GetInputChannel_AlreadyStarted(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
_, _ = w.GetOutputChannel()
ic1 := make(chan interface{})
_ = w.SetInputChannel(ic1)
_ = w.Start(context.Background())
ic2, err := w.GetInputChannel()
if err != ErrAlreadyStarted {
t.Errorf("Expected ErrAlreadyStarted error. Got %q.", err)
}
if ic2 != nil {
t.Errorf("Expected nil input channel")
}
close(ic1)
}
func TestWorker_SetInputChannel_AlreadyStarted(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
_, _ = w.GetOutputChannel()
ic := make(chan interface{})
_ = w.SetInputChannel(ic)
_ = w.Start(context.Background())
err := w.SetInputChannel(ic)
if err != ErrAlreadyStarted {
t.Errorf("Expected ErrAlreadyStarted error. Got %q.", err)
}
close(ic)
}
func TestWorker_GetOutputChannel_AlreadyStarted(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
oc1 := make(chan interface{})
_ = w.SetOutputChannel(oc1)
ic := make(chan interface{})
_ = w.SetInputChannel(ic)
_ = w.Start(context.Background())
oc2, err := w.GetOutputChannel()
if err != ErrAlreadyStarted {
t.Errorf("Expected ErrAlreadyStarted error. Got %q.", err)
}
if oc2 != nil {
t.Errorf("Expected nil input channel")
}
close(oc1)
close(ic)
}
func TestWorker_SetOutputChannel_AlreadyStarted(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, nil
})
oc := make(chan interface{})
_ = w.SetOutputChannel(oc)
ic := make(chan interface{})
_ = w.SetInputChannel(ic)
_ = w.Start(context.Background())
err := w.SetOutputChannel(oc)
if err != ErrAlreadyStarted {
t.Errorf("Expected ErrAlreadyStarted error. Got %q.", err)
}
close(oc)
close(ic)
}
func TestWorker_WorkerFuncError(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
return nil, fmt.Errorf("error test")
})
ic := make(chan interface{})
_ = w.SetInputChannel(ic)
oc, _ := w.GetOutputChannel()
ctx := context.Background()
_ = w.Start(ctx)
go func() {
result := <-oc
we, ok := result.(WorkerError)
if !ok {
t.Errorf("Expected WorkerError. Got %t.", result)
}
if we.Error.Error() != "error test" {
t.Errorf("Expected error \"error test\". Got %q.",
we.Error.Error())
}
}()
ic <- struct{}{}
// Clean shutdown.
close(ic)
ctx.WaitForChildren()
}
func TestWorker_WorkerFuncSuccess(t *testing.T) {
w, _ := New(
func(interface{}, context.Context) (interface{}, error) {
time.Sleep(1 * time.Millisecond)
return "test result", nil
})
ic := make(chan interface{})
_ = w.SetInputChannel(ic)
oc, _ := w.GetOutputChannel()
ctx := context.Background()
_ = w.Start(ctx)
go func() {
for i := 0; i < 10; i++ {
result := <-oc
r, ok := result.(string)
if !ok {
t.Errorf("Expected string. Got %t.", result)
}
if r != "test result" {
t.Errorf("Expected result \"test result\". "+
"Got %q.", r)
}
}
}()
for i := 0; i < 10; i++ {
ic <- struct{}{}
}
// Clean shutdown.
close(ic)
ctx.WaitForChildren()
err := ctx.Err()
if err != nil {
t.Errorf("Expected nil error. Got %v.", err)
}
}
func TestWorker_WorkerFuncSuccess_MultipleWorkers(t *testing.T) {
w1, _ := New(
func(interface{}, context.Context) (interface{}, error) {
time.Sleep(1 * time.Millisecond)
return "test result", nil
})
w2, _ := New(
func(interface{}, context.Context) (interface{}, error) {
time.Sleep(1 * time.Millisecond)
return "test result", nil
})
ic := make(chan interface{})
_ = w1.SetInputChannel(ic)
_ = w2.SetInputChannel(ic)
oc := make(chan interface{})
_ = w1.SetOutputChannel(oc)
_ = w2.SetOutputChannel(oc)
ctx := context.Background()
_ = w1.Start(ctx)
_ = w2.Start(ctx)
go func() {
for i := 0; i < 10; i++ {
result := <-oc
r, ok := result.(string)
if !ok {
t.Errorf("Expected string. Got %t.", result)
}
if r != "test result" {
t.Errorf("Expected result \"test result\". "+
"Got %q.", r)
}
}
}()
for i := 0; i < 10; i++ {
ic <- struct{}{}
}
go func() {
time.Sleep(1 * time.Millisecond)
// Clean shutdown.
close(ic)
}()
ctx.WaitForChildren()
// At this point workers finished and cleaned up. Wait will say that
// workers are not started.
err := ctx.Err()
if err != nil {
t.Errorf("Expected nil error. Got %v.", err)
}
}
|
package main
import "secure/app"
func main() {
app.Start()
}
|
package util
import (
"context"
"fmt"
"path/filepath"
"strings"
"github.com/werf/werf/pkg/docker"
)
func RemoveHostDirsWithLinuxContainer(ctx context.Context, mountDir string, dirs []string) error {
var containerDirs []string
for _, dir := range dirs {
containerDirs = append(containerDirs, ToLinuxContainerPath(dir))
}
args := []string{
"--rm",
"--volume", fmt.Sprintf("%s:%s", mountDir, ToLinuxContainerPath(mountDir)),
"alpine",
"rm", "-rf",
}
args = append(args, containerDirs...)
return docker.CliRun(ctx, args...)
}
func ToLinuxContainerPath(path string) string {
return filepath.ToSlash(
strings.TrimPrefix(
path,
filepath.VolumeName(path),
),
)
}
|
package goo_mq
import (
"fmt"
"github.com/Shopify/sarama"
"github.com/liqiongtao/goo"
"time"
)
type KafkaProducer struct {
*Kafka
producer sarama.AsyncProducer
}
func (*KafkaProducer) config() *sarama.Config {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
config.Producer.Timeout = 5 * time.Second
config.Version = sarama.V0_10_2_0
return config
}
func (p *KafkaProducer) Init() {
producer, err := sarama.NewAsyncProducer(p.Addrs, p.config())
if err != nil {
goo.Log.Error("[kafka-producer]", err.Error())
panic(err.Error())
}
go func() {
for {
select {
case suc := <-producer.Successes():
goo.Log.Debug("[kafka-producer]",
fmt.Sprintf("partitions=%d topic=%s offset=%d value=%s",
suc.Partition, suc.Topic, suc.Offset, suc.Value))
case err := <-producer.Errors():
goo.Log.Error("[kafka-producer]", err.Error())
case <-p.Context.Done():
return
}
}
}()
p.producer = producer
}
func (p *KafkaProducer) SendMessage(topic string, message []byte) error {
msg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder(message),
Key: sarama.StringEncoder(fmt.Sprintf("%d", time.Now().UnixNano())),
}
p.producer.Input() <- msg
return nil
}
|
package leetcode
func buddyStrings(A string, B string) bool {
if A == B {
dup := make(map[rune]struct{})
for _, c := range A {
if _, ok := dup[c]; ok {
return true
}
dup[c] = struct{}{}
}
return false
}
chA, chB := byte(0), byte(0)
lA, lB := len(A), len(B)
if lA != lB {
return false
}
changed := false
for i := 0; i < lA; i++ {
if A[i] != B[i] {
if changed {
return false
} else if chA == 0 {
chA, chB = A[i], B[i]
} else if chB == A[i] && chA == B[i] {
changed = true
} else {
return false
}
}
}
return changed
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"time"
)
func main() {
fs := http.FileServer(http.Dir("static"))
http.Handle("/favicon.ico", http.StripPrefix("/favicon.ico", fs))
http.HandleFunc("/", handleJob)
log.Println("Server started on port 3000")
http.ListenAndServe(":3000", nil)
}
//Payload ...
type Payload struct {
Text1 string
Text2 string
Text3 string
}
//Job ..
type Job struct {
Payload Payload
}
func doRequest(job Job) int {
// api call
time.Sleep(1 * time.Second)
return 1
}
func handleJob(w http.ResponseWriter, r *http.Request) {
jobs := []Job{}
for j := 1; j <= 10000; j++ {
jobs = append(jobs, Job{
Payload{
Text1: fmt.Sprintf("%d-1-test", j),
Text2: fmt.Sprintf("%d-2-test", j),
Text3: fmt.Sprintf("%d-3-test", j),
},
})
}
pool := newWorkerPool(1000)
result := pool.Run(jobs, func(job Job) int {
return doRequest(job)
})
js, _ := json.Marshal(result)
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
type workerPool struct {
Jobs chan Job
Results chan int
Func func(Job) int
}
func newWorkerPool(workerCount int) *workerPool {
return &workerPool{
Results: make(chan int, workerCount),
}
}
func (wp *workerPool) Run(jobs []Job, fn func(Job) int) int {
wp.Jobs = make(chan Job, len(jobs))
wp.Func = fn
for w := 1; w <= cap(wp.Results); w++ {
go wp.worker()
}
for _, job := range jobs {
wp.Jobs <- job
}
close(wp.Jobs)
result := 0
for range jobs {
result += <-wp.Results
}
return result
}
func (wp *workerPool) worker() {
for job := range wp.Jobs {
wp.Results <- wp.Func(job)
}
}
|
// Copyright (c) 2020 Xiaozhe Yao & AICAMP.CO.,LTD
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
package entities
import (
"reflect"
"testing"
)
func TestPrivateEnvironment_TableName(t *testing.T) {
type fields struct {
ID string
Name string
}
tests := []struct {
name string
fields fields
want string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &PrivateEnvironment{
ID: tt.fields.ID,
Name: tt.fields.Name,
}
if got := p.TableName(); got != tt.want {
t.Errorf("PrivateEnvironment.TableName() = %v, want %v", got, tt.want)
}
})
}
}
func TestEnvironmentVariable_TableName(t *testing.T) {
type fields struct {
ID string
Key string
Value string
Environment string
PackageID string
}
tests := []struct {
name string
fields fields
want string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ev := &EnvironmentVariable{
ID: tt.fields.ID,
Key: tt.fields.Key,
Value: tt.fields.Value,
Environment: tt.fields.Environment,
PackageID: tt.fields.PackageID,
}
if got := ev.TableName(); got != tt.want {
t.Errorf("EnvironmentVariable.TableName() = %v, want %v", got, tt.want)
}
})
}
}
func TestEnvironmentVariable_PK(t *testing.T) {
type fields struct {
ID string
Key string
Value string
Environment string
PackageID string
}
tests := []struct {
name string
fields fields
want string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ev := &EnvironmentVariable{
ID: tt.fields.ID,
Key: tt.fields.Key,
Value: tt.fields.Value,
Environment: tt.fields.Environment,
PackageID: tt.fields.PackageID,
}
if got := ev.PK(); got != tt.want {
t.Errorf("EnvironmentVariable.PK() = %v, want %v", got, tt.want)
}
})
}
}
func TestPrivateEnvironment_PK(t *testing.T) {
type fields struct {
ID string
Name string
}
tests := []struct {
name string
fields fields
want string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &PrivateEnvironment{
ID: tt.fields.ID,
Name: tt.fields.Name,
}
if got := p.PK(); got != tt.want {
t.Errorf("PrivateEnvironment.PK() = %v, want %v", got, tt.want)
}
})
}
}
func TestEnvironmentVariable_Save(t *testing.T) {
type fields struct {
ID string
Key string
Value string
Environment string
PackageID string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ev := &EnvironmentVariable{
ID: tt.fields.ID,
Key: tt.fields.Key,
Value: tt.fields.Value,
Environment: tt.fields.Environment,
PackageID: tt.fields.PackageID,
}
if err := ev.Save(); (err != nil) != tt.wantErr {
t.Errorf("EnvironmentVariable.Save() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestPrivateEnvironment_Save(t *testing.T) {
type fields struct {
ID string
Name string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &PrivateEnvironment{
ID: tt.fields.ID,
Name: tt.fields.Name,
}
if err := p.Save(); (err != nil) != tt.wantErr {
t.Errorf("PrivateEnvironment.Save() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestGetEnvironmentVariablesbyPackageID(t *testing.T) {
type args struct {
packageID string
environment string
}
tests := []struct {
name string
args args
want []EnvironmentVariable
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := GetEnvironmentVariablesbyPackageID(tt.args.packageID, tt.args.environment); !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetEnvironmentVariablesbyPackageID() = %v, want %v", got, tt.want)
}
})
}
}
func TestMergeEnvironmentVariables(t *testing.T) {
type args struct {
envs []EnvironmentVariable
}
tests := []struct {
name string
args args
want []string
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := MergeEnvironmentVariables(tt.args.envs); !reflect.DeepEqual(got, tt.want) {
t.Errorf("MergeEnvironmentVariables() = %v, want %v", got, tt.want)
}
})
}
}
|
package utils
import (
"bytes"
"encoding/gob"
"log"
)
func GetBytes(data interface{}) ([]byte, error) {
var buffer bytes.Buffer
encoder := gob.NewEncoder(&buffer)
err := encoder.Encode(data)
if err != nil {
return nil, err
}
return buffer.Bytes(), nil
}
func MLogger(message string, statusCode int, err error) {
log.Printf("{message: %s, code: %d, error: %s}\n", message, statusCode, err.Error())
}
|
package models
import (
"gopkg.in/mgo.v2/bson"
"github.com/astaxie/beego"
)
// task model
type Task struct {
Id bson.ObjectId `bson:"_id" json:"id" form:"-"`
Name string `bson:"name" json:"name" form:"name"`
Done bool `bson:"done" json:"done" form:"done"`
}
// db & collection info
// extracting it from AppConfig
var (
db = beego.AppConfig.String("db")
collection = beego.AppConfig.String("collection")
)
// Insert a new Task to DB
func InsertTask(task Task) error {
return Insert(db, collection, task)
}
// Query tasks info
func FindAllTasks() ([]Task, error) {
var result []Task
err := FindAll(db, collection, nil, nil, &result)
return result, err
}
// Update Task info
func UpdateTask(task Task) error {
return Update(db, collection, bson.M{"_id": task.Id}, task)
}
// del a task
func RemoveTask(id string) error {
return Remove(db, collection, bson.M{"_id": bson.ObjectIdHex(id)})
} |
package reflection
import (
"fmt"
"log"
"math"
"reflect"
"testing"
)
type ExampleStruct struct {
Int8 int8
Int16 int16
Int32 int32
Int64 int64
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
Float32 float32
Float64 float64
Bool bool
String string
Map map[string]int
Func func()
Ptr *string
Array []int
}
func TestPopulateFromStringWithInt8(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Int8")
err := PopulateFromString(field, "128", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-129", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-128", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int8 != -128 {
t.Fail()
}
err = PopulateFromString(field, "28", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int8 != 28 {
t.Fail()
}
}
func TestPopulateFromStringWithInt16(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Int16")
err := PopulateFromString(field, "32768", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-32769", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-32768", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int16 != -32768 {
t.Fail()
}
err = PopulateFromString(field, "512", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int16 != 512 {
t.Fail()
}
}
func TestPopulateFromStringWithInt32(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Int32")
err := PopulateFromString(field, "2147483648", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-2147483649", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-2147483648", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int32 != -2147483648 {
t.Fail()
}
err = PopulateFromString(field, "214740000", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int32 != 214740000 {
t.Fail()
}
}
func TestPopulateFromStringWithInt64(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Int64")
err := PopulateFromString(field, "9223372036854775808", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-9223372036854775809", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-9223372036854775808", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int64 != -9223372036854775808 {
t.Fail()
}
err = PopulateFromString(field, "9223372036854775800", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Int64 != 9223372036854775800 {
t.Fail()
}
}
func TestPopulateFromStringWithUint8(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Uint8")
err := PopulateFromString(field, "256", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-1", false)
if err == nil {
log.Print("failed on the negative uint test")
t.Fail()
}
err = PopulateFromString(field, "28", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Uint8 != 28 {
t.Fail()
}
}
func TestPopulateFromStringWithUint16(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Uint16")
err := PopulateFromString(field, "65536", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-1", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "512", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Uint16 != 512 {
t.Fail()
}
}
func TestPopulateFromStringWithUint32(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Uint32")
err := PopulateFromString(field, "4294967296", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-1", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "214740000", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Uint32 != 214740000 {
t.Fail()
}
}
func TestPopulateFromStringWithUint64(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Uint64")
err := PopulateFromString(field, "18446744073709551616", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "-1", false)
if err == nil {
log.Print(err)
t.Fail()
}
err = PopulateFromString(field, "9223372036854775800", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Uint64 != 9223372036854775800 {
t.Fail()
}
}
func TestPopulateFromStringWithFloat32(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Float32")
err := PopulateFromString(field, "214740000.23781278321", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Float32 != 214740000.23781278321 {
t.Fail()
}
err = PopulateFromString(field, fmt.Sprintf("%f", math.MaxFloat64), false)
if err == nil {
t.Fail()
}
}
func TestPopulateFromStringWithFloat64(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Float64")
err := PopulateFromString(field, "9223372036854775800.12382136172", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Float64 != 9223372036854775800.12382136172 {
t.Fail()
}
err = PopulateFromString(field, fmt.Sprintf("1.797693134862315708145274237317043567981e+309"), false)
if err == nil {
t.Fail()
}
}
func TestPopulateFromStringWithBool(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Bool")
err := PopulateFromString(field, "nonbool-lolkek", false)
if err == nil {
t.Fail()
}
err = PopulateFromString(field, "true", false)
if err != nil {
log.Print(err)
t.Fail()
}
if !initializedStruct.Bool {
t.Fail()
}
err = PopulateFromString(field, "false", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.Bool {
t.Fail()
}
}
func TestPopulateFromStringWithString(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("String")
err := PopulateFromString(field, "Hello World!", false)
if err != nil {
log.Print(err)
t.Fail()
}
if initializedStruct.String != "Hello World!" {
t.Fail()
}
}
func TestPopulateFromStringWithUnsupportedTypeFails(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Func")
err := PopulateFromString(field, "Hello World!", false)
if err == nil {
t.Fail()
}
if err.Error() != "unsupported field type, got: func" {
t.Fail()
}
}
// Order matters with this test.
func TestPopulateFromStringWithPointer(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Ptr")
err := PopulateFromString(field, "", true)
if err != nil {
t.Fail()
}
if initializedStruct.Ptr != nil {
t.Fail()
}
err = PopulateFromString(field, "", false)
if err != nil {
t.Fail()
}
if initializedStruct.Ptr == nil {
t.Fail()
}
err = PopulateFromString(field, "Hello World!", false)
if err != nil {
t.Fail()
}
if *initializedStruct.Ptr != "Hello World!" {
t.Fail()
}
}
func TestPopulateFromStringWithArray(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Array")
err := PopulateFromString(field, "[1,2,3]", false)
if err != nil {
log.Print(err)
t.Fail()
}
if len(initializedStruct.Array) != 3 {
t.Fail()
}
if initializedStruct.Array[0] != 1 && initializedStruct.Array[1] != 2 && initializedStruct.Array[2] != 3 {
t.Fail()
}
}
func TestPopulateFromStringWithMap(t *testing.T) {
initializedStruct := &ExampleStruct{}
reflectedStruct := reflect.ValueOf(initializedStruct)
field := reflectedStruct.Elem().FieldByName("Map")
err := PopulateFromString(field, `{"a":1,"b":2,"c":3}`, false)
if err != nil {
log.Print(err)
t.Fail()
}
if len(initializedStruct.Map) != 3 {
t.Fail()
}
if initializedStruct.Map["a"] != 1 && initializedStruct.Map["b"] != 2 && initializedStruct.Map["c"] != 3 {
t.Fail()
}
}
|
package setup
import (
"fmt"
"github.com/gardener/test-infra/integration-tests/e2e/config"
"github.com/gardener/test-infra/integration-tests/e2e/kubetest"
"github.com/gardener/test-infra/integration-tests/e2e/util"
tmutil "github.com/gardener/test-infra/pkg/util"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
)
func Setup() error {
cleanUpPreviousRuns()
if err := areTestUtilitiesReady(); err == nil {
log.Info("all test utilities were already ready")
log.Info("setup finished successfuly. Testutilities ready. Kubetest is ready for usage.")
return nil
}
log.Info("test utilities are not ready. Install...")
if err := getKubetestAndUtilities(); err != nil {
return errors.Wrap(err, "unable to setup kubetest and utilities")
}
if err := areTestUtilitiesReady(); err != nil {
return err
}
log.Info("setup finished successfuly. Testutilities ready. Kubetest is ready for usage.")
return nil
}
func getKubetestAndUtilities() error {
goModuleOriginValue := os.Getenv("GO111MODULE")
_ = os.Setenv("GO111MODULE", "on")
if _, err := util.RunCmd(fmt.Sprintf("go get k8s.io/test-infra/kubetest@%s", config.TestInfraVersion), "/"); err != nil {
return err
}
_ = os.Setenv("GO111MODULE", goModuleOriginValue)
if err := os.MkdirAll(config.K8sRoot, os.ModePerm); err != nil {
return errors.Wrapf(err, "unable to create directories %s", config.K8sRoot)
}
if _, err := util.RunCmd(fmt.Sprintf("kubetest --provider=skeleton --extract=v%s", config.K8sRelease), config.K8sRoot); err != nil {
return err
}
return nil
}
func cleanUpPreviousRuns() {
if err := os.RemoveAll(config.LogDir); err != nil {
log.Error(err)
}
testResultFiles := util.GetFilesByPattern(config.ExportPath, `test.*\.json$`)
for _, file := range testResultFiles {
if err := os.Remove(file); err != nil {
log.Error(err)
}
}
if err := os.Remove(kubetest.GeneratedRunDescPath); err != nil {
log.Error(err)
}
_ = os.Remove(filepath.Join(config.ExportPath, "started.json"))
_ = os.Remove(filepath.Join(config.ExportPath, "finished.json"))
_ = os.Remove(filepath.Join(config.ExportPath, "e2e.log"))
_ = os.Remove(filepath.Join(config.ExportPath, "junit_01.xml"))
}
func PostRunCleanFiles() error {
// remove log dir
if err := os.RemoveAll(config.LogDir); err != nil {
return err
}
// remove kubernetes folder
if err := os.RemoveAll(os.Getenv("GOPATH")); err != nil {
return err
}
//remove downloads dir
if err := os.RemoveAll(config.DownloadsDir); err != nil {
return err
}
return nil
}
func areTestUtilitiesReady() error {
log.Info("checking whether any test utility is not ready")
var res *multierror.Error
if !util.CommandExists("kubetest") {
res = multierror.Append(res, errors.New("kubetest not installed"))
} else {
log.Info("kubetest binary available")
}
// check if required directories exist
requiredPaths := [...]string{
path.Join(config.K8sRoot, "kubernetes/hack"),
path.Join(config.K8sRoot, "kubernetes/cluster"),
path.Join(config.K8sRoot, "kubernetes/test"),
path.Join(config.K8sRoot, "kubernetes/client"),
path.Join(config.K8sRoot, "kubernetes/server")}
for _, requiredPath := range requiredPaths {
if _, err := os.Stat(requiredPath); err != nil {
res = multierror.Append(res, errors.Wrapf(err, "dir %s does not exist: ", requiredPath))
} else {
log.Info(fmt.Sprintf("%s dir exists", requiredPath))
}
}
kubernetesVersionFile := path.Join(config.K8sRoot, "kubernetes/version")
currentKubernetesVersionByte, err := ioutil.ReadFile(kubernetesVersionFile)
if err != nil || len(currentKubernetesVersionByte) == 0 {
res = multierror.Append(res, fmt.Errorf("Required file %s does not exist or is empty: ", kubernetesVersionFile))
} else if currentKubernetesVersion := strings.TrimSpace(string(currentKubernetesVersionByte[1:])); currentKubernetesVersion != config.K8sRelease {
res = multierror.Append(res, fmt.Errorf("found kubernetes version %s, required version %s: ", currentKubernetesVersion, config.K8sRelease))
}
return tmutil.ReturnMultiError(res)
}
|
package database
import (
"testing"
)
func TestMysql(t *testing.T) {
DB := Connect()
err := DB.Ping()
if err != nil {
t.Errorf("DB connection %d", "ping error")
}
}
|
package bundler
import (
"log"
"os"
"github.com/streadway/amqp"
)
var (
amqpURI = "amqp://" + os.Getenv("RABBITMQ_HOST") + ":" +
os.Getenv("RABBITMQ_PORT")
amqpExchange = "siphon.apps.notifications"
amqpExchangeType = "fanout"
amqpConsumerTag = "siphon-bundler"
)
// PostAppUpdated sends an app_updated notification on the app notification
// exchange so that the Siphon Sandbox, simulator or developer device can
// refresh itself accordingly.
func PostAppUpdated(appID string, userID string) {
log.Printf("Dialing %s", amqpURI)
conn, err := amqp.Dial(amqpURI)
if err != nil {
log.Printf("(ignored) Error opening RMQ connection: %v", err)
return
}
defer conn.Close()
c, err := conn.Channel()
if err != nil {
log.Printf("(ignored) Error opening RMQ channel: %v", err)
return
}
log.Printf("Declaring exchange (%q)", amqpExchange)
err = c.ExchangeDeclare(
amqpExchange, // name of the exchange
amqpExchangeType, // type
true, // durable
false, // delete when complete
false, // internal
false, // noWait
nil, // arguments
)
// Declare the JSON inline rather than mucking around with a struct
payload := `{"type": "app_updated", "app_id": "` + appID +
`", "user_id": "` + userID + `"}`
log.Printf("Posting payload: %s", payload)
err = c.Publish(amqpExchange, "", false, false, amqp.Publishing{
ContentType: "application/json",
Body: []byte(payload),
})
if err != nil {
log.Printf("Error posting notification to RabbitMQ: %v", err)
return
}
}
|
package main
import "fmt"
func main() {
defer fmt.Println("Bye")
defer fmt.Println("Bye1")
fmt.Println("Hello")
fmt.Println("Hye")
}
|
package aws
import (
"context"
"encoding/base64"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
awssession "github.com/openshift/installer/pkg/asset/installconfig/aws"
"github.com/openshift/installer/pkg/gather"
"github.com/openshift/installer/pkg/gather/providers"
"github.com/openshift/installer/pkg/types"
"github.com/openshift/installer/pkg/version"
)
// Filter holds the key/value pairs for the tags we will be matching against.
//
// A resource matches the filter if all of the key/value pairs are in its tags.
type Filter map[string]string
// Gather holds options for resources we want to gather.
type Gather struct {
logger logrus.FieldLogger
filters []Filter
region string
bootstrap string
masters []string
directory string
serialLogBundle string
// Session is the AWS session to be used for gathering. If nil, a new
// session will be created based on the usual credential configuration
// (AWS_PROFILE, AWS_ACCESS_KEY_ID, etc.).
session *session.Session
}
// New returns an AWS Gather from ClusterMetadata.
func New(logger logrus.FieldLogger, serialLogBundle string, bootstrap string, masters []string, metadata *types.ClusterMetadata) (providers.Gather, error) {
filters := make([]Filter, 0, len(metadata.ClusterPlatformMetadata.AWS.Identifier))
for _, filter := range metadata.ClusterPlatformMetadata.AWS.Identifier {
filters = append(filters, filter)
}
region := metadata.ClusterPlatformMetadata.AWS.Region
session, err := awssession.GetSessionWithOptions(
awssession.WithRegion(region),
awssession.WithServiceEndpoints(region, metadata.ClusterPlatformMetadata.AWS.ServiceEndpoints),
)
if err != nil {
return nil, err
}
return &Gather{
logger: logger,
region: region,
filters: filters,
session: session,
serialLogBundle: serialLogBundle,
bootstrap: bootstrap,
masters: masters,
directory: filepath.Dir(serialLogBundle),
}, nil
}
// Run is the entrypoint to start the gather process.
func (g *Gather) Run() error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
awsSession := g.session
if awsSession == nil {
var err error
// Relying on appropriate AWS ENV vars (eg AWS_PROFILE, AWS_ACCESS_KEY_ID, etc)
awsSession, err = session.NewSession(aws.NewConfig().WithRegion(g.region))
if err != nil {
return err
}
}
awsSession.Handlers.Build.PushBackNamed(request.NamedHandler{
Name: "openshiftInstaller.OpenshiftInstallerUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler("OpenShift/4.x Gather", version.Raw),
})
ec2Client := ec2.New(awsSession)
instances, err := g.findEC2Instances(ctx, ec2Client)
if err != nil {
return err
}
if len(instances) == 0 {
g.logger.Infoln("Skipping console log gathering: no instances found")
return nil
}
serialLogBundleDir := strings.TrimSuffix(filepath.Base(g.serialLogBundle), ".tar.gz")
filePathDir := filepath.Join(g.directory, serialLogBundleDir)
err = os.MkdirAll(filePathDir, 0755)
if err != nil && !errors.Is(err, os.ErrExist) {
return err
}
var errs []error
var files []string
for _, instance := range instances {
filePath, err := g.downloadConsoleOutput(ctx, ec2Client, instance, filePathDir)
if err != nil {
errs = append(errs, err)
} else {
files = append(files, filePath)
}
}
if len(files) > 0 {
err := gather.CreateArchive(files, g.serialLogBundle)
if err != nil {
errs = append(errs, errors.Wrap(err, "failed to create archive"))
}
}
if err := gather.DeleteArchiveDirectory(filePathDir); err != nil {
// Note: cleanup is best effort, it shouldn't fail the gather
g.logger.Debugf("Failed to remove archive directory: %v", err)
}
return utilerrors.NewAggregate(errs)
}
// findEC2Instances returns the EC2 instances with tags that satisfy the filters.
func (g *Gather) findEC2Instances(ctx context.Context, ec2Client *ec2.EC2) ([]*ec2.Instance, error) {
if ec2Client.Config.Region == nil {
return nil, errors.New("EC2 client does not have region configured")
}
var instances []*ec2.Instance
for _, filter := range g.filters {
g.logger.Debugf("Search for matching instances by tag in %s matching %#+v", *ec2Client.Config.Region, filter)
instanceFilters := make([]*ec2.Filter, 0, len(g.filters))
for key, value := range filter {
instanceFilters = append(instanceFilters, &ec2.Filter{
Name: aws.String("tag:" + key),
Values: []*string{aws.String(value)},
})
}
err := ec2Client.DescribeInstancesPagesWithContext(
ctx,
&ec2.DescribeInstancesInput{Filters: instanceFilters},
func(results *ec2.DescribeInstancesOutput, lastPage bool) bool {
for _, reservation := range results.Reservations {
if reservation.OwnerId == nil {
continue
}
for _, instance := range reservation.Instances {
if instance.InstanceId != nil {
instances = append(instances, instance)
}
}
}
return !lastPage
},
)
if err != nil {
err = errors.Wrap(err, "get ec2 instances")
return instances, err
}
}
return instances, nil
}
func (g *Gather) downloadConsoleOutput(ctx context.Context, ec2Client *ec2.EC2, instance *ec2.Instance, filePathDir string) (string, error) {
logger := g.logger.WithField("Instance", aws.StringValue(instance.InstanceId))
input := &ec2.GetConsoleOutputInput{
InstanceId: instance.InstanceId,
}
result, err := ec2Client.GetConsoleOutputWithContext(ctx, input)
if err != nil {
// Cast err to awserr.Error to get the Message from an error.
if aerr, ok := err.(awserr.Error); ok {
logger.Errorln(aerr.Error())
}
return "", err
}
instanceName := aws.StringValue(result.InstanceId)
for _, tags := range instance.Tags {
if strings.EqualFold(aws.StringValue(tags.Key), "Name") {
instanceName = aws.StringValue(tags.Value)
}
}
logger.Debugf("Attemping to download console logs for %s", instanceName)
filePath, err := g.saveToFile(instanceName, aws.StringValue(result.Output), filePathDir)
if err != nil {
return "", err
}
logger.Debug("Download complete")
return filePath, nil
}
func (g *Gather) saveToFile(instanceName, content, filePathDir string) (string, error) {
data, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return "", errors.Wrap(err, "failed to decode console output")
}
filename := filepath.Join(filePathDir, fmt.Sprintf("%s-serial.log", instanceName))
file, err := os.Create(filename)
if err != nil {
return "", errors.Wrap(err, "failed to create file")
}
defer file.Close()
_, err = file.Write(data)
if err != nil {
return "", errors.Wrap(err, "failed to write to file")
}
return filename, nil
}
|
package assertions
import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestShouldEqual(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{`a`},
},
},
{
name: "with string and multiple value",
args: args{
actual: `foo bar goo`,
expected: []interface{}{`foo`, `bar`, `goo`},
},
},
{
name: "with int",
args: args{
actual: 1,
expected: []interface{}{1},
},
},
{
name: "with float",
args: args{
actual: 1.0,
expected: []interface{}{1.0},
},
},
{
name: "different types",
args: args{
actual: 42,
expected: []interface{}{"42"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldEqual(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldEqual() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotEqual(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{`b`},
},
},
{
name: "with int",
args: args{
actual: 1,
expected: []interface{}{2},
},
},
{
name: "with float",
args: args{
actual: 1.0,
expected: []interface{}{2.0},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotEqual(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotEqual() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldAlmostEqual(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{`b`},
},
wantErr: true,
},
{
name: "with int",
args: args{
actual: 10,
expected: []interface{}{9, 2},
},
},
{
name: "with float",
args: args{
actual: 1.1,
expected: []interface{}{1.2, 0.1},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldAlmostEqual(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("TestShouldAlmostEqual() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotAlmostEqual(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{`b`},
},
wantErr: true,
},
{
name: "with int",
args: args{
actual: 10,
expected: []interface{}{5, 2},
},
},
{
name: "with float",
args: args{
actual: 1.1,
expected: []interface{}{1.5, 0.1},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotAlmostEqual(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotAlmostEqual() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeTrue(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
},
wantErr: true,
},
{
name: "with args",
args: args{
actual: 1,
expected: []interface{}{1},
},
wantErr: true,
},
{
name: "with bool",
args: args{
actual: true,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeTrue(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeTrue() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeFalse(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
},
wantErr: true,
},
{
name: "with args",
args: args{
actual: 1,
expected: []interface{}{1},
},
wantErr: true,
},
{
name: "with bool",
args: args{
actual: false,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeFalse(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeFalse() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeNil(t *testing.T) {
var m map[string]string
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
},
wantErr: true,
},
{
name: "with int",
args: args{
actual: 1,
},
wantErr: true,
},
{
name: "with nothing",
},
{
name: "with a nil map",
args: args{
actual: m,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeNil(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeNil() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotBeNil(t *testing.T) {
var m map[string]string
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
},
},
{
name: "with int",
args: args{
actual: 1,
},
},
{
name: "with nothing",
wantErr: true,
},
{
name: "with a nil map",
args: args{
actual: m,
},
wantErr: true,
},
{
name: "with an empty slice",
args: args{
actual: []string{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotBeNil(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotBeNil() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeZeroValue(t *testing.T) {
var m map[string]string
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: ``,
},
},
{
name: "with int",
args: args{
actual: 0,
},
},
{
name: "with nothing",
},
{
name: "with a nil map",
args: args{
actual: m,
},
},
{
name: "with an empty slice",
args: args{
actual: []string{},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeZeroValue(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeZeroValue() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeGreaterThan(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `b`,
expected: []interface{}{"a"},
},
},
{
name: "with int",
args: args{
actual: 2,
expected: []interface{}{1},
},
},
{
name: "with float",
args: args{
actual: 2.0,
expected: []interface{}{1.0},
},
},
{
name: "with wrong types",
args: args{
actual: 2.0,
expected: []interface{}{"a"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeGreaterThan(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeGreaterThan() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeGreaterThanOrEqualTo(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{"a"},
},
},
{
name: "with int",
args: args{
actual: 2,
expected: []interface{}{2},
},
},
{
name: "with float",
args: args{
actual: 2.0,
expected: []interface{}{2.0},
},
},
{
name: "with string",
args: args{
actual: `b`,
expected: []interface{}{"a"},
},
},
{
name: "with int",
args: args{
actual: 2,
expected: []interface{}{1},
},
},
{
name: "with float",
args: args{
actual: 2.0,
expected: []interface{}{1.0},
},
},
{
name: "with wrong types",
args: args{
actual: 2.0,
expected: []interface{}{"a"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeGreaterThanOrEqualTo(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeGreaterThanOrEqualTo() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeBetween(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `b`,
expected: []interface{}{"a", "c"},
},
},
{
name: "with int",
args: args{
actual: 2,
expected: []interface{}{1, 3},
},
},
{
name: "with float",
args: args{
actual: 2.0,
expected: []interface{}{1.0, 3.0},
},
},
{
name: "with wrong types",
args: args{
actual: 2.0,
expected: []interface{}{"a", 3},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeBetween(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeBetween() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotBeBetween(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{"b", "c"},
},
},
{
name: "with int",
args: args{
actual: 1,
expected: []interface{}{2, 3},
},
},
{
name: "with float",
args: args{
actual: 1.0,
expected: []interface{}{2.0, 3.0},
},
},
{
name: "with wrong types",
args: args{
actual: 2.0,
expected: []interface{}{"a", 3},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotBeBetween(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotBeBetween() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldContain(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: []interface{}{"a", "c"},
expected: []interface{}{`a`},
},
},
{
name: "with int",
args: args{
actual: []interface{}{1, 2},
expected: []interface{}{1},
},
},
{
name: "with float",
args: args{
actual: []interface{}{1.0, 2.0},
expected: []interface{}{1.0},
},
},
{
name: "raise error",
args: args{
actual: []interface{}{1.0, 2.0},
expected: []interface{}{3.0},
},
wantErr: true,
},
{
name: "raise error with nothing",
args: args{
expected: []interface{}{"something"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldContain(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldContain() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotContain(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: []interface{}{"a", "c"},
expected: []interface{}{`b`},
},
},
{
name: "with int",
args: args{
actual: []interface{}{1, 2},
expected: []interface{}{3},
},
},
{
name: "with float",
args: args{
actual: []interface{}{1.0, 2.0},
expected: []interface{}{1.1},
},
},
{
name: "raise error",
args: args{
actual: []interface{}{1.0, 2.0},
expected: []interface{}{1.0},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotContain(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotContain() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldContainKey(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: map[string]interface{}{"a": "", "c": ""},
expected: []interface{}{`a`},
},
},
{
name: "raise error",
args: args{
actual: map[string]interface{}{"a": "", "c": ""},
expected: []interface{}{`b`},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldContainKey(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldContainKey() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotContainKey(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: map[string]interface{}{"a": "", "c": ""},
expected: []interface{}{`b`},
},
},
{
name: "raise error",
args: args{
actual: map[string]interface{}{"a": "", "c": ""},
expected: []interface{}{`a`},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotContainKey(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotContainKey() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldBeEmpty(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: map[string]interface{}{},
},
},
{
name: "ko",
args: args{
actual: map[string]interface{}{"a": "", "c": ""},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldBeEmpty(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldBeEmpty() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotBeEmpty(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ko",
args: args{
actual: map[string]interface{}{},
},
wantErr: true,
},
{
name: "ok",
args: args{
actual: map[string]interface{}{"a": "", "c": ""},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotBeEmpty(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotBeEmpty() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldHaveLength(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok with slice",
args: args{
actual: []interface{}{"a"},
expected: []interface{}{1},
},
},
{
name: "ok with map",
args: args{
actual: map[string]interface{}{"a": "a"},
expected: []interface{}{1},
},
},
{
name: "ok with string",
args: args{
actual: "a",
expected: []interface{}{1},
},
},
{
name: "ko",
args: args{
actual: []interface{}{"a"},
expected: []interface{}{2},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldHaveLength(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldHaveLength() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldStartWith(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: "aaa",
expected: []interface{}{"a"},
},
},
{
name: "ko",
args: args{
actual: "aaa",
expected: []interface{}{"b"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldStartWith(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldStartWith() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotStartWith(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: "aaa",
expected: []interface{}{"b"},
},
},
{
name: "ko",
args: args{
actual: "aaa",
expected: []interface{}{"a"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotStartWith(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotStartWith() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldEndWith(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: "aaa-",
expected: []interface{}{"a-"},
},
},
{
name: "ko",
args: args{
actual: "aaa-",
expected: []interface{}{"b"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldEndWith(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldEndWith() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotEndWith(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: "aaa-",
expected: []interface{}{"b"},
},
},
{
name: "ko",
args: args{
actual: "aaa-",
expected: []interface{}{"a-"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotEndWith(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotEndWith() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldContainSubstring(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: "aaa-x",
expected: []interface{}{"a-"},
},
},
{
name: "ko",
args: args{
actual: "aaa-x",
expected: []interface{}{"b-"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldContainSubstring(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldContainSubstring() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldNotContainSubstring(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: "aaa-x",
expected: []interface{}{"b-"},
},
},
{
name: "ko",
args: args{
actual: "aaa-x",
expected: []interface{}{"a-"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldNotContainSubstring(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldNotContainSubstring() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldEqualTrimSpace(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok with string",
args: args{
actual: ` a`,
expected: []interface{}{`a`},
},
},
{
name: "ko",
args: args{
actual: ` ba`,
expected: []interface{}{`a`},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldEqualTrimSpace(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldEqualTrimSpace() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldHappenBefore(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(1 * time.Second)},
},
},
{
name: "ko",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(-1 * time.Second)},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldHappenBefore(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldHappenBefore() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldHappenOnOrBefore(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(1 * time.Second)},
},
},
{
name: "ko",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(-1 * time.Second)},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldHappenOnOrBefore(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldHappenOnOrBefore() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldHappenAfter(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(-1 * time.Second)},
},
},
{
name: "ko",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(1 * time.Second)},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldHappenAfter(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldHappenAfter() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldHappenOnOrAfter(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(-1 * time.Second)},
},
},
{
name: "ko",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(1 * time.Second)},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldHappenOnOrAfter(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldHappenOnOrAfter() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldHappenBetween(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(-1 * time.Second), time.Now().Add(1 * time.Second)},
},
},
{
name: "ko",
args: args{
actual: time.Now(),
expected: []interface{}{time.Now().Add(1 * time.Second), time.Now().Add(2 * time.Second)},
},
wantErr: true,
},
{
name: "ok",
args: args{
actual: "2006-01-02T15:04:05+07:00",
expected: []interface{}{"2006-01-02T15:04:00+07:00", "2006-01-02T15:04:10+07:00"},
},
},
{
name: "ko",
args: args{
actual: "2006-01-02T15:04:00+07:00",
expected: []interface{}{"2006-01-02T15:04:05+07:00", "2006-01-02T15:04:10+07:00"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldHappenBetween(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldHappenBetween() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldTimeEqual(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
Parisloc, _ := time.LoadLocation("Europe/Paris")
now := time.Now()
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "ok",
args: args{
actual: now,
expected: []interface{}{now.In(Parisloc)},
},
},
{
name: "ko",
args: args{
actual: now,
expected: []interface{}{now.Add(1 * time.Second)},
},
wantErr: true,
},
{
name: "ok",
args: args{
actual: "2006-01-02T15:04:00+02:00",
expected: []interface{}{"2006-01-02T13:04:00Z"},
},
},
{
name: "ko",
args: args{
actual: "2006-01-02T15:04:00+07:00",
expected: []interface{}{"2006-01-02T15:04:05Z"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldTimeEqual(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldTimeEqual() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldMatchRegex(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "with string",
args: args{
actual: `a`,
expected: []interface{}{`a`},
},
},
{
name: "with string regex",
args: args{
actual: `abc`,
expected: []interface{}{`a.*c$`},
},
},
{
name: "with number regex",
args: args{
actual: `abc-123`,
expected: []interface{}{`.*[0-9]{3}$`},
},
},
{
name: "with regex throwing error",
args: args{
actual: `abc-123`,
expected: []interface{}{`.*[0-9]{6}$`},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldMatchRegex(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
msg := fmt.Sprintf("value %v not matching pattern : %v", tt.args.actual, tt.args.expected[0])
assert.ErrorContainsf(t, err, msg, "Contains message")
t.Errorf("ShouldMatchRegex() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestShouldJSONEqual(t *testing.T) {
type args struct {
actual interface{}
expected []interface{}
}
tests := []struct {
name string
args args
wantErr bool
}{
// Objects and arrays
{
name: "object",
args: args{
actual: map[string]interface{}{"a": 1, "b": 2, "c": map[string]interface{}{"x": 1, "y": 2}},
expected: []interface{}{`{"a":1,"b":2,"c":{"x":1,"y":2}}`},
},
},
{
// Spaces, newlines, tabs and key order (including in nested objects) don't matter
name: "object",
args: args{
actual: map[string]interface{}{"a": 1, "b": 2, "c": map[string]interface{}{"x": 1, "y": 2}},
expected: []interface{}{` { "c" : { "y" : 2 , "x" : 1 }, "b" : 2 ,` + "\n\t" + ` "a" : 1 } `},
},
},
{
name: "array",
args: args{
actual: []interface{}{1, 2},
expected: []interface{}{`[1,2]`},
},
},
{
// Spaces, newlines and tabs don't matter
name: "array",
args: args{
actual: []interface{}{1, 2},
expected: []interface{}{` [ 1 ,` + "\n\t" + ` 2 ] `},
},
},
// Object and array errors
{
name: "bad value",
args: args{
actual: map[string]interface{}{"a": 1},
expected: []interface{}{`{"a":2}`},
},
wantErr: true,
},
{
name: "bad type",
args: args{
actual: map[string]interface{}{"a": 1},
expected: []interface{}{`{"a":"1"}`},
},
wantErr: true,
},
{
name: "missing key",
args: args{
actual: map[string]interface{}{"a": 1, "b": 2},
expected: []interface{}{`{"a":1}`},
},
wantErr: true,
},
{
name: "bad array order",
args: args{
actual: map[string]interface{}{"a": []float64{1, 2}},
expected: []interface{}{`{"a":[2,1]}`},
},
wantErr: true,
},
{
name: "object instead of array",
args: args{
actual: map[string]interface{}{"a": 1},
expected: []interface{}{`[1]`},
},
wantErr: true,
},
{
name: "array instead of object",
args: args{
actual: []interface{}{1},
expected: []interface{}{`{"a":1}}`},
},
wantErr: true,
},
// Primitive values
{
name: "string",
args: args{
actual: "a",
expected: []interface{}{"a"},
},
},
{
name: "empty string",
args: args{
actual: "",
expected: []interface{}{""},
},
},
{
name: "number",
args: args{
actual: json.Number("1"),
expected: []interface{}{`1`},
},
},
{
name: "number",
args: args{
actual: json.Number("1.2"),
expected: []interface{}{`1.2`},
},
},
{
name: "boolean",
args: args{
actual: true,
expected: []interface{}{`true`},
},
},
{
// TODO: Shouldn't be valid, but Venom currently passes an empty string to the assertion function when the JSON value is `null`.
name: "null",
args: args{
actual: "",
expected: []interface{}{`null`},
},
},
// Primitive value errors
{
name: "bad value",
args: args{
actual: "a",
expected: []interface{}{"b"},
},
wantErr: true,
},
{
name: "bad type",
args: args{
actual: float64(1),
expected: []interface{}{"1"},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ShouldJSONEqual(tt.args.actual, tt.args.expected...); (err != nil) != tt.wantErr {
t.Errorf("ShouldJSONEqual() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
|
package main
import (
"cloud.google.com/go/logging"
cplogging "commentparser/logging"
"commentparser/models"
"commentparser/server"
"commentparser/services"
"encoding/json"
"fmt"
"golang.org/x/net/context"
"google.golang.org/api/option"
"io/ioutil"
"log"
"os"
"strings"
"time"
)
// entry point for the application, see readme.md for instructions
func main() {
fmt.Printf("Starting Comment Parser %v\n\n", time.Now())
// look for server mode
if len(os.Args) < 2 {
os.Stderr.WriteString("Two parameters required: package_name and (comma-seperated) search_terms or " +
"'server' optionally followed by configuration path location")
} else if os.Args[1] == "server" {
ctx := context.Background()
var config server.Configuration
// this is the default dir to look at the configuration
configFilePath := os.Getenv("HOME") + "/configuration/development.json"
if len(os.Args) > 2 && len(os.Args[2]) > 0 {
configFilePath = os.Args[2]
}
configFile, err := ioutil.ReadFile(configFilePath)
if err != nil {
panic(err)
}
err = json.Unmarshal(configFile, &config)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not parse the configuration file at %s", configFilePath)
panic(err)
}
fmt.Printf("Will start server at %s", config.Address)
credFilePath := config.GoogleCloudCredFile
if strings.Index(credFilePath, "~/") == 0 {
credFilePath = credFilePath[1:]
credFilePath = os.Getenv("HOME") + credFilePath
}
client, err := logging.NewClient(
ctx,
config.GoogleCloudProjectID,
option.WithCredentialsFile(
credFilePath))
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
loggerGC := client.Logger(config.LogName)
loggingGC := cplogging.NewStackdriverLogger(loggerGC)
loggingGC.Debug("Starting server")
// measurement
measurementGC := server.NewMeasurementStackdriver(loggerGC)
server.CommentParserHttpServer(config, loggingGC, measurementGC)
if err := client.Close(); err != nil {
log.Fatalf("Failed to close client: %v", err)
}
} else {
searchTerms := strings.Split(os.Args[2], ",")
request := models.CommentParsingRequest{
PackageName: os.Args[1],
Tokens: searchTerms,
}
res, err := services.ExtractRelevantComments(request, cplogging.NewConsoleLogging())
for _, matches := range res.Matches {
for _, match := range matches {
fmt.Fprintf(
os.Stdout,
"%s:%v:\n%s\n",
match.FileName,
match.LineNumber,
match.LineContent)
}
}
if err != nil {
panic(err)
}
}
os.Exit(0)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package guestdrivers
import (
"context"
"fmt"
"strings"
"yunion.io/x/pkg/utils"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudcommon/db/taskman"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/compute/models"
"yunion.io/x/onecloud/pkg/mcclient"
"yunion.io/x/onecloud/pkg/util/billing"
)
type SAwsGuestDriver struct {
SManagedVirtualizedGuestDriver
}
func init() {
driver := SAwsGuestDriver{}
models.RegisterGuestDriver(&driver)
}
func fetchAwsUserName(desc cloudprovider.SManagedVMCreateConfig) string {
// 非公有云官方镜像
if desc.ImageType != "system" {
return "root"
}
// 公有云官方镜像
dist := strings.ToLower(desc.OsDistribution)
if strings.Contains(dist, "centos") {
return "centos"
} else if strings.Contains(dist, "ubuntu") {
return "ubuntu"
} else if strings.Contains(dist, "windows") {
return "Administrator"
} else if strings.Contains(dist, "debian") {
return "admin"
} else if strings.Contains(dist, "suse") {
return "ec2-user"
} else if strings.Contains(dist, "fedora") {
return "ec2-user"
} else if strings.Contains(dist, "rhel") || strings.Contains(dist, "redhat") {
return "ec2-user"
} else if strings.Contains(dist, "amazon linux") {
return "ec2-user"
} else {
return "ec2-user"
}
}
func (self *SAwsGuestDriver) GetLinuxDefaultAccount(desc cloudprovider.SManagedVMCreateConfig) string {
// return fetchAwsUserName(desc)
return models.VM_AWS_DEFAULT_LOGIN_USER
}
func (self *SAwsGuestDriver) GetHypervisor() string {
return models.HYPERVISOR_AWS
}
func (self *SAwsGuestDriver) GetDefaultSysDiskBackend() string {
return models.STORAGE_GP2_SSD
}
func (self *SAwsGuestDriver) GetMinimalSysDiskSizeGb() int {
return 10
}
func (self *SAwsGuestDriver) GetStorageTypes() []string {
return []string{
models.STORAGE_GP2_SSD,
models.STORAGE_IO1_SSD,
models.STORAGE_ST1_HDD,
models.STORAGE_SC1_HDD,
models.STORAGE_STANDARD_HDD,
}
}
func (self *SAwsGuestDriver) ChooseHostStorage(host *models.SHost, backend string) *models.SStorage {
storages := host.GetAttachedStorages("")
for i := 0; i < len(storages); i += 1 {
if storages[i].StorageType == backend {
return &storages[i]
}
}
for _, stype := range self.GetStorageTypes() {
for i := 0; i < len(storages); i += 1 {
if storages[i].StorageType == stype {
return &storages[i]
}
}
}
return nil
}
func (self *SAwsGuestDriver) GetDetachDiskStatus() ([]string, error) {
return []string{models.VM_READY, models.VM_RUNNING}, nil
}
func (self *SAwsGuestDriver) GetAttachDiskStatus() ([]string, error) {
return []string{models.VM_READY, models.VM_RUNNING}, nil
}
func (self *SAwsGuestDriver) GetRebuildRootStatus() ([]string, error) {
return []string{models.VM_READY, models.VM_RUNNING}, nil
}
func (self *SAwsGuestDriver) GetChangeConfigStatus() ([]string, error) {
return []string{models.VM_READY}, nil
}
func (self *SAwsGuestDriver) GetDeployStatus() ([]string, error) {
return []string{models.VM_READY, models.VM_RUNNING}, nil
}
func (self *SAwsGuestDriver) RequestDetachDisk(ctx context.Context, guest *models.SGuest, task taskman.ITask) error {
return guest.StartSyncTask(ctx, task.GetUserCred(), false, task.GetTaskId())
}
func (self *SAwsGuestDriver) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, input *api.ServerCreateInput) (*api.ServerCreateInput, error) {
return self.SManagedVirtualizedGuestDriver.ValidateCreateData(ctx, userCred, input)
}
func (self *SAwsGuestDriver) ValidateResizeDisk(guest *models.SGuest, disk *models.SDisk, storage *models.SStorage) error {
// https://docs.amazonaws.cn/AWSEC2/latest/UserGuide/stop-start.html
if !utils.IsInStringArray(guest.Status, []string{models.VM_RUNNING, models.VM_READY}) {
return fmt.Errorf("Cannot resize disk when guest in status %s", guest.Status)
}
if disk.DiskType == models.DISK_TYPE_SYS && !utils.IsInStringArray(storage.StorageType, []string{models.STORAGE_IO1_SSD, models.STORAGE_STANDARD_HDD, models.STORAGE_GP2_SSD}) {
return fmt.Errorf("Cannot resize system disk with unsupported volumes type %s", storage.StorageType)
}
if !utils.IsInStringArray(storage.StorageType, []string{models.STORAGE_GP2_SSD, models.STORAGE_IO1_SSD, models.STORAGE_ST1_HDD, models.STORAGE_SC1_HDD, models.STORAGE_STANDARD_HDD}) {
return fmt.Errorf("Cannot resize %s disk", storage.StorageType)
}
return nil
}
func (self *SAwsGuestDriver) GetGuestInitialStateAfterCreate() string {
return models.VM_RUNNING
}
func (self *SAwsGuestDriver) GetGuestInitialStateAfterRebuild() string {
return models.VM_READY
}
/*func (self *SAwsGuestDriver) RequestDeployGuestOnHost(ctx context.Context, guest *models.SGuest, host *models.SHost, task taskman.ITask) error {
config, err := guest.GetDeployConfigOnHost(ctx, task.GetUserCred(), host, task.GetParams())
if err != nil {
log.Errorf("GetDeployConfigOnHost error: %v", err)
return err
}
log.Debugf("RequestDeployGuestOnHost: %s", config)
desc := cloudprovider.SManagedVMCreateConfig{}
if err := desc.GetConfig(config); err != nil {
return err
}
action, err := config.GetString("action")
if err != nil {
return err
}
ihost, err := host.GetIHost()
if err != nil {
return err
}
username := fetchAwsUserName(desc)
switch action {
case "create":
taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) {
iVM, createErr := ihost.CreateVM(&desc)
if createErr != nil {
return nil, createErr
}
guest.SetExternalId(task.GetUserCred(), iVM.GetGlobalId())
log.Debugf("VMcreated %s, wait status running ...", iVM.GetGlobalId())
err = cloudprovider.WaitStatus(iVM, models.VM_RUNNING, time.Second*5, time.Second*1800)
if err != nil {
return nil, err
}
log.Debugf("VMcreated %s, and status is ready", iVM.GetGlobalId())
iVM, err = ihost.GetIVMById(iVM.GetGlobalId())
if err != nil {
log.Errorf("cannot find vm %s", err)
return nil, err
}
data := fetchIVMinfo(desc, iVM, guest.Id, username, desc.Password, action)
return data, nil
})
case "deploy":
iVM, err := ihost.GetIVMById(guest.GetExternalId())
if err != nil || iVM == nil {
log.Errorf("cannot find vm %s", err)
return fmt.Errorf("cannot find vm")
}
params := task.GetParams()
log.Debugf("Deploy VM params %s", params.String())
deleteKeypair := jsonutils.QueryBoolean(params, "__delete_keypair__", false)
taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) {
err := iVM.DeployVM(ctx, desc.Name, desc.Password, desc.PublicKey, deleteKeypair, desc.Description)
if err != nil {
return nil, err
}
data := fetchIVMinfo(desc, iVM, guest.Id, username, desc.Password, action)
return data, nil
})
case "rebuild":
iVM, err := ihost.GetIVMById(guest.GetExternalId())
if err != nil || iVM == nil {
log.Errorf("cannot find vm %s", err)
return fmt.Errorf("cannot find vm")
}
taskman.LocalTaskRun(task, func() (jsonutils.JSONObject, error) {
diskId, err := iVM.RebuildRoot(ctx, desc.ExternalImageId, desc.Password, desc.PublicKey, desc.SysDisk.SizeGB)
if err != nil {
return nil, err
}
log.Debugf("VMrebuildRoot %s new diskID %s, wait status ready ...", iVM.GetGlobalId(), diskId)
err = cloudprovider.WaitStatus(iVM, models.VM_READY, time.Second*5, time.Second*1800)
if err != nil {
return nil, err
}
log.Debugf("VMrebuildRoot %s, and status is ready", iVM.GetGlobalId())
maxWaitSecs := 300
waited := 0
for {
// hack, wait disk number consistent
idisks, err := iVM.GetIDisks()
if err != nil {
log.Errorf("fail to find VM idisks %s", err)
return nil, err
}
if len(idisks) < len(desc.DataDisks)+1 {
if waited > maxWaitSecs {
log.Errorf("inconsistent disk number, wait timeout, must be something wrong on remote")
return nil, cloudprovider.ErrTimeout
}
log.Debugf("inconsistent disk number???? %d != %d", len(idisks), len(desc.DataDisks)+1)
time.Sleep(time.Second * 5)
waited += 5
} else {
if idisks[0].GetGlobalId() != diskId {
log.Errorf("system disk id inconsistent %s != %s", idisks[0].GetGlobalId(), diskId)
return nil, fmt.Errorf("inconsistent sys disk id after rebuild root")
}
break
}
}
data := fetchIVMinfo(desc, iVM, guest.Id, username, desc.Password, action)
return data, nil
})
default:
log.Errorf("RequestDeployGuestOnHost: Action %s not supported", action)
return fmt.Errorf("Action %s not supported", action)
}
return nil
}*/
func (self *SAwsGuestDriver) IsSupportedBillingCycle(bc billing.SBillingCycle) bool {
return false
}
|
package redis
import (
"context"
"time"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
)
//go:generate confions config Config
// Config contains configuration options for a connection pool to a redis
// database.
type Config struct {
Network string
Address string
Database int
Username string
Password string
MaxIdle int
IdleTimeout time.Duration
InternalMaxActive int
}
var DefaultConfig = &Config{
MaxIdle: 10,
IdleTimeout: 300 * time.Second,
}
// CreatePool creates and returns a new redis.Pool from the passed in Config.
// An error is returned if Config contains invalid or confliciting values.
// The Pool is not "tested".
func CreatePool(config *Config) (*redis.Pool, error) {
return &redis.Pool{
Dial: func() (redis.Conn, error) {
return redis.Dial(
config.Network,
config.Address,
redis.DialDatabase(config.Database),
redis.DialUsername(config.Username),
redis.DialPassword(config.Password),
)
},
MaxIdle: config.MaxIdle,
IdleTimeout: config.IdleTimeout,
}, nil
}
// TestPool tries to perform a PING command on a connection retrieved from pool.
// Any error is wrapped and returned.
func TestPool(ctx context.Context, pool *redis.Pool) error {
ctx, cancel := context.WithCancel(ctx)
conn, err := pool.GetContext(ctx)
cancel()
if err != nil {
return errors.Wrap(err, "redis.TestPool: getting connection from pool failed")
}
_, err = conn.Do("PING")
_ = conn.Close()
if err != nil {
return errors.Wrap(err, "redis.TestPool: performing PING failed")
}
return nil
}
|
package evaluation
// #cgo CFLAGS: -I${SRCDIR}/../rust
// #cgo LDFLAGS: -L${SRCDIR}/../rust/expr_tree/target/release -lexpr_tree
// #include "expr_tree/src/expr_tree.h"
// #include <stdlib.h>
import "C"
import (
"reflect"
"unsafe"
)
// EvalFromBytesRust passes the given flatbuffer to Rust for evaluation,
// and returns the result.
func EvalFromBytesInRust(bs []byte) (float64, error) {
sh := (*reflect.SliceHeader)(unsafe.Pointer(&bs))
data := (*C.char)(unsafe.Pointer(sh.Data))
ln := C.int(sh.Len)
ans := C.eval_from_c(data, ln)
return float64(ans), nil
}
// GetBytesFromRust() returns a byte slice that contains a
// simple expression tree in a flatbuffer.
// The returned function must be called to free the memory
// when it is no longer needed.
func GetBytesFromRust() ([]byte, func(), error) {
var ln C.int
var offset C.int
ptr := C.get_expr_tree(&ln, &offset)
sh := new(reflect.SliceHeader)
sh.Data = uintptr(unsafe.Pointer(ptr))
sh.Len = int(ln)
sh.Cap = int(ln)
bs := *(*[]byte)(unsafe.Pointer(sh))
bs = bs[offset:]
freeFn := func() {
C.free_expr_tree(ptr, ln)
}
return bs, freeFn, nil
}
|
package 数组
import (
"bytes"
"strings"
)
func numUniqueEmails(emails []string) int {
hasEmailExist := make(map[string]bool)
for _, email := range emails {
hasEmailExist[getFormattedEmail(email)] = true
}
return len(hasEmailExist)
}
func getFormattedEmail(email string) string {
parts := strings.Split(email, "@")
return getFormattedLocalName(parts[0]) + "@" + parts[1]
}
func getFormattedLocalName(sourceLocalName string) string {
localName := bytes.Buffer{}
for i := 0; i < len(sourceLocalName); i++ {
if sourceLocalName[i] == '+' {
break
}
if sourceLocalName[i] != '.' {
localName.WriteByte(sourceLocalName[i])
}
}
return localName.String()
}
/*
题目链接: https://leetcode-cn.com/problems/unique-email-addresses/
*/
|
package main
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func buildTree(preorder []int, inorder []int) *TreeNode {
return buildfTreeRe(preorder, inorder)
}
// 递归从先跟中找到跟,从中跟中找到位置 ,划分左右子树
func buildfTreeRe(preorder []int, inorder []int) *TreeNode {
if len(preorder) == 0 && len(inorder) == 0 {
return nil
}
tmp := &TreeNode{
Val: preorder[0],
}
k := findK(preorder[0], inorder)
tmp.Left = buildfTreeRe(preorder[1:k+1], inorder[0:k])
tmp.Right = buildfTreeRe(preorder[k+1:], inorder[k+1:])
return tmp
}
func findK(find int, inorder []int) int {
for k, v := range inorder {
if v == find {
return k
}
}
return -1
}
|
// Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package main
import (
"fmt"
"log"
"strings"
"app/context"
"fidl/bindings"
"netstack/link/eth"
"syscall/zx"
"syscall/zx/mxerror"
"garnet/public/lib/netstack/fidl/net_address"
nsfidl "garnet/public/lib/netstack/fidl/netstack"
"github.com/google/netstack/tcpip"
"github.com/google/netstack/tcpip/transport/tcp"
"github.com/google/netstack/tcpip/transport/udp"
)
type netstackImpl struct {
listener *nsfidl.NotificationListener_Proxy
stub *bindings.Stub
}
func toNetAddress(addr tcpip.Address) net_address.NetAddress {
out := net_address.NetAddress{Family: net_address.NetAddressFamily_Unspecified}
switch len(addr) {
case 4:
out.Family = net_address.NetAddressFamily_Ipv4
out.Ipv4 = &[4]uint8{}
copy(out.Ipv4[:], addr[:])
case 16:
out.Family = net_address.NetAddressFamily_Ipv6
out.Ipv6 = &[16]uint8{}
copy(out.Ipv6[:], addr[:])
}
return out
}
func toSubnets(addrs []tcpip.Address) []net_address.Subnet {
out := make([]net_address.Subnet, len(addrs))
for i := range addrs {
// TODO: prefix len?
out[i] = net_address.Subnet{Addr: toNetAddress(addrs[i]), PrefixLen: 64}
}
return out
}
func getInterfaces() (out []nsfidl.NetInterface) {
ns.mu.Lock()
defer ns.mu.Unlock()
index := uint32(0)
for nicid, ifs := range ns.ifStates {
// Long-hand for: broadaddr = ifs.nic.Addr | ^ifs.nic.Netmask
broadaddr := []byte(ifs.nic.Addr)
for i := range broadaddr {
broadaddr[i] |= ^ifs.nic.Netmask[i]
}
var flags uint32
if ifs.state == eth.StateStarted {
flags |= nsfidl.NetInterfaceFlagUp
}
outif := nsfidl.NetInterface{
Id: uint32(nicid),
Flags: flags,
Name: fmt.Sprintf("en%d", nicid),
Addr: toNetAddress(ifs.nic.Addr),
Netmask: toNetAddress(tcpip.Address(ifs.nic.Netmask)),
Broadaddr: toNetAddress(tcpip.Address(broadaddr)),
Hwaddr: []uint8(ifs.nic.Mac[:]),
Ipv6addrs: toSubnets(ifs.nic.Ipv6addrs),
}
out = append(out, outif)
index++
}
return out
}
func (ni *netstackImpl) RegisterListener(listener *nsfidl.NotificationListener_Pointer) (err error) {
if listener != nil {
lp := nsfidl.NewProxyForNotificationListener(*listener, bindings.GetAsyncWaiter())
ni.listener = lp
}
return nil
}
func (ni *netstackImpl) GetPortForService(service string, protocol nsfidl.Protocol) (port uint16, err error) {
switch protocol {
case nsfidl.Protocol_Udp:
port, err = serviceLookup(service, udp.ProtocolNumber)
case nsfidl.Protocol_Tcp:
port, err = serviceLookup(service, tcp.ProtocolNumber)
default:
port, err = serviceLookup(service, tcp.ProtocolNumber)
if err != nil {
port, err = serviceLookup(service, udp.ProtocolNumber)
}
}
return port, err
}
func (ni *netstackImpl) GetAddress(name string, port uint16) (out []net_address.SocketAddress, netErr nsfidl.NetErr, retErr error) {
// TODO: This should handle IP address strings, empty strings, "localhost", etc. Pull the logic from
// fdio's getaddrinfo into here.
addrs, err := ns.dispatcher.dnsClient.LookupIP(name)
if err == nil {
out = make([]net_address.SocketAddress, len(addrs))
netErr = nsfidl.NetErr{Status: nsfidl.Status_Ok}
for i, addr := range addrs {
switch len(addr) {
case 4, 16:
out[i].Addr = toNetAddress(addr)
out[i].Port = port
}
}
} else {
netErr = nsfidl.NetErr{Status: nsfidl.Status_DnsError, Message: err.Error()}
}
return out, netErr, nil
}
func (ni *netstackImpl) GetInterfaces() (out []nsfidl.NetInterface, err error) {
return getInterfaces(), nil
}
func (ni *netstackImpl) GetNodeName() (out string, err error) {
ns.mu.Lock()
nodename := ns.nodename
ns.mu.Unlock()
return nodename, nil
}
func (ni *netstackImpl) GetRouteTable() (out []nsfidl.RouteTableEntry, err error) {
ns.mu.Lock()
table := ns.stack.GetRouteTable()
ns.mu.Unlock()
for _, route := range table {
// Ensure that if any of the returned addresss are "empty",
// they still have the appropriate NetAddressFamily.
l := 0
if len(route.Destination) > 0 {
l = len(route.Destination)
} else if len(route.Mask) > 0 {
l = len(route.Destination)
} else if len(route.Gateway) > 0 {
l = len(route.Gateway)
}
dest := route.Destination
mask := route.Mask
gateway := route.Gateway
if len(dest) == 0 {
dest = tcpip.Address(strings.Repeat("\x00", l))
}
if len(mask) == 0 {
mask = tcpip.Address(strings.Repeat("\x00", l))
}
if len(gateway) == 0 {
gateway = tcpip.Address(strings.Repeat("\x00", l))
}
out = append(out, nsfidl.RouteTableEntry{
Destination: toNetAddress(dest),
Netmask: toNetAddress(mask),
Gateway: toNetAddress(gateway),
Nicid: uint32(route.NIC),
})
}
return out, nil
}
func (ni *netstackImpl) GetAggregateStats() (stats nsfidl.AggregateStats, err error) {
s := ns.stack.Stats()
return nsfidl.AggregateStats{
UnknownProtocolReceivedPackets: s.UnknownProtocolRcvdPackets,
UnknownNetworkEndpointReceivedPackets: s.UnknownNetworkEndpointRcvdPackets,
MalformedReceivedPackets: s.MalformedRcvdPackets,
DroppedPackets: s.DroppedPackets,
TcpStats: nsfidl.TcpStats{
ActiveConnectionOpenings: s.TCP.ActiveConnectionOpenings,
PassiveConnectionOpenings: s.TCP.PassiveConnectionOpenings,
FailedConnectionAttempts: s.TCP.FailedConnectionAttempts,
ValidSegmentsReceived: s.TCP.ValidSegmentsReceived,
InvalidSegmentsReceived: s.TCP.InvalidSegmentsReceived,
SegmentsSent: s.TCP.SegmentsSent,
ResetsSent: s.TCP.ResetsSent,
},
}, nil
}
func (ni *netstackImpl) GetStats(nicid uint32) (stats nsfidl.NetInterfaceStats, err error) {
// Pure reading of statistics. No critical section. No lock is needed.
ifState, ok := ns.ifStates[tcpip.NICID(nicid)]
if !ok {
return nsfidl.NetInterfaceStats{}, fmt.Errorf("no such interface id: %d", nicid)
}
return ifState.statsEP.Stats, nil
}
func (ni *netstackImpl) SetInterfaceStatus(nicid uint32, enabled bool) (err error) {
ifState, ok := ns.ifStates[tcpip.NICID(nicid)]
if !ok {
// TODO(mpcomplete): This will close the FIDL channel. Should fail more gracefully.
return fmt.Errorf("no such interface id: %d", nicid)
}
if enabled {
ifState.eth.Start()
} else {
ifState.eth.Down()
}
return nil
}
func (ni *netstackImpl) onInterfacesChanged(interfaces []nsfidl.NetInterface) {
if ni.listener != nil {
ni.listener.OnInterfacesChanged(interfaces)
}
}
type netstackDelegate struct {
clients []*netstackImpl
}
func remove(clients []*netstackImpl, client *netstackImpl) []*netstackImpl {
for i, s := range clients {
if s == client {
clients[len(clients)-1], clients[i] = clients[i], clients[len(clients)-1]
break
}
}
return clients
}
func (delegate *netstackDelegate) Bind(request nsfidl.Netstack_Request) {
client := &netstackImpl{}
client.stub = request.NewStub(client, bindings.GetAsyncWaiter())
delegate.clients = append(delegate.clients, client)
go func() {
for {
if err := client.stub.ServeRequest(); err != nil {
if mxerror.Status(err) != zx.ErrPeerClosed {
log.Println(err)
}
break
}
}
delegate.clients = remove(delegate.clients, client)
}()
}
func (delegate *netstackDelegate) Quit() {
for _, client := range delegate.clients {
client.stub.Close()
}
}
var netstackService *netstackDelegate
// AddNetstackService registers the NetstackService with the application context,
// allowing it to respond to FIDL queries.
func AddNetstackService(ctx *context.Context) error {
if netstackService != nil {
return fmt.Errorf("AddNetworkService must be called only once")
}
netstackService = &netstackDelegate{}
ctx.OutgoingService.AddService(&nsfidl.Netstack_ServiceBinder{netstackService})
return nil
}
func OnInterfacesChanged() {
if netstackService != nil && netstackService.clients != nil {
interfaces := getInterfaces()
for _, client := range netstackService.clients {
client.onInterfacesChanged(interfaces)
}
}
}
|
func convertToTitle(n int) string {
res:=[]rune{}
for n!=0{
n--
res = append([]rune{rune(n%26)+'A'},res...)
n = n/26
}
return string(res)
}
|
package main
import (
"html/template"
"os"
"log"
"strings"
)
var tpl *template.Template
type structure struct{
Name string
Age int
}
var fm = template.FuncMap{
"trim" : first_3,
}
func first_3(str string) string{
str = strings.TrimSpace(str)
str =str[:3]
return str
}
func init(){
tpl=template.Must(template.New("").Funcs(fm).ParseFiles("passfn.gohtml"))
//tpl = template.Must(template.ParseFiles("fns.gohtml"))
}
func main() {
st :=structure{Name: "Aman", Age:21} //composit type 3
st1 :=structure{Name: "Ambuj", Age:23}
slice_st :=[]structure{st,st1}
err :=tpl.ExecuteTemplate(os.Stdout,"passfn.gohtml",slice_st)
if err!=nil{
log.Fatalln(err)
}
}
|
package listener
import (
"app/base/database"
"app/base/structures"
"github.com/bmizerany/assert"
"testing"
"app/base/core"
)
func TestStorageInit(t *testing.T) {
storage := InitStorage(3, false)
assert.Equal(t, 0, storage.StoredItems())
assert.Equal(t, 3, storage.Capacity())
}
func TestStorageFlush(t *testing.T) {
core.SetupTestEnvironment()
storage := InitStorage(3, false)
for _, item := range []structures.HostDAO{{ID: 1}, {ID: 2}} {
err := storage.Add(&item)
assert.Equal(t, nil, err)
}
assert.Equal(t, 2, storage.StoredItems())
assert.Equal(t, 3, storage.Capacity())
err := storage.Flush() // write items to database
assert.Equal(t, nil, err)
// ensure items in database
cnt := 0
database.Db.Model(&structures.HostDAO{}).Count(&cnt)
assert.Equal(t, 2, cnt)
}
func TestStorageBuffer(t *testing.T) {
core.SetupTestEnvironment()
storage := InitStorage(2, false)
for _, item := range []structures.HostDAO{{ID: 1}, {ID: 2}, {ID: 3}} {
err := storage.Add(&item)
assert.Equal(t, nil, err)
}
assert.Equal(t, 1, storage.StoredItems())
assert.Equal(t, 2, storage.Capacity())
// ensure items in database
cnt := 0
database.Db.Model(&structures.HostDAO{}).Count(&cnt)
assert.Equal(t, 2, cnt)
}
|
package transactions
import (
"encoding/json"
"net/http"
"github.com/garyburd/redigo/redis"
"github.com/felipeguilhermefs/restis/router"
)
func MultiRoute(conn redis.Conn) router.Route {
return router.Route{
"/multi",
"POST",
MultiHandler(conn),
}
}
func MultiHandler(conn redis.Conn) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
value, err := redis.String(conn.Do("MULTI"))
if err != nil {
json.NewEncoder(w).Encode(nil)
return
}
if err := json.NewEncoder(w).Encode(value); err != nil {
panic(err)
}
}
}
|
package main
import "fmt"
type Vehicle interface {
Move()
}
type Car struct {
MovementType string
}
// This method means type Car implements the interface Vehicle,
// but we don't need to explicitly declare that it does so.
func (c Car) Move() {
fmt.Println(c.MovementType)
}
func main() {
var v Vehicle = Car{"Accelerate"}
v.Move()
}
|
package command
import (
"fmt"
"github.com/urfave/cli"
"mix/core/logger"
"mix/core/plugin"
"mix/plugins/mysql"
"os"
)
const (
NAME = "mysql"
DATABASE = "database"
CONNECTION = "connection"
USERS = "users"
USERNAME = "username"
PASSWORD = "password"
PRIVILEGES = "privileges"
HOST = "host"
CHARSET = "charset"
COLLATE = "collate"
DAEMON = "daemon"
)
type Handler struct {
config Config
tables map[string]*mysql.Entity
rootPath string
}
func NewHandler() *Handler {
handler := new(Handler)
var err error
handler.rootPath, err = os.Getwd()
if err != nil {
logger.Error("New Handler error", err)
os.Exit(1)
}
return handler
}
func (p *Handler) Name() string {
return NAME
}
func (p *Handler) loadConfig() {
err := plugin.NewConfig(NAME).UnmarshalExact(&p.config)
if err != nil {
logger.Error("New Handler error", err)
os.Exit(1)
}
}
func (p *Handler) Commands() cli.Commands {
migrationDatabaseFlag := cli.StringFlag{
Name: fmt.Sprintf("%s", "database"),
Required: true,
Usage: "Migration database name",
}
commands := []cli.Command{
{
Name: "mysql:diff",
Usage: "-",
Action: p.DiffCommand,
Flags: []cli.Flag{
migrationDatabaseFlag,
cli.StringFlag{
Required: true,
Name: "connections",
Usage: "Select connections, example: conn1:conn2",
},
cli.StringFlag{
Required: true,
Name: "databases",
Usage: "Select connection1, example: db1:db2",
},
},
},
{
Name: "mysql:sql",
Usage: "-",
Action: p.SqlCommand,
Flags: []cli.Flag{
migrationDatabaseFlag,
},
},
{
Name: "mysql:sync",
Usage: "mix mysql:sync --connection=con1 --database=db1",
Description: "Generate mysql table entities.",
Action: p.SyncCommand,
Flags: []cli.Flag{
cli.StringFlag{
Required: true,
Name: "connection",
Usage: "Select connection",
},
migrationDatabaseFlag,
},
},
{
Name: "mysql:monitor",
Usage: "-",
Action: p.MonitorCommand,
Flags: []cli.Flag{
cli.StringFlag{
Required: true,
Name: "connection",
Usage: "Select connection",
},
cli.BoolFlag{
Name: fmt.Sprintf("%s", DAEMON),
Usage: "Run as daemon",
},
},
},
}
return commands
}
|
package api
import (
"fmt"
"net/http"
"net/http/httptest"
"os"
"regexp"
"testing"
"github.com/MakeNowJust/heredoc"
"github.com/cli/cli/v2/pkg/iostreams"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewHTTPClient(t *testing.T) {
type args struct {
config tokenGetter
appVersion string
setAccept bool
}
tests := []struct {
name string
args args
envDebug string
setGhDebug bool
envGhDebug string
host string
wantHeader map[string]string
wantStderr string
}{
{
name: "github.com with Accept header",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
setAccept: true,
},
host: "github.com",
wantHeader: map[string]string{
"authorization": "token MYTOKEN",
"user-agent": "GitHub CLI v1.2.3",
"accept": "application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview",
},
wantStderr: "",
},
{
name: "github.com no Accept header",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
setAccept: false,
},
host: "github.com",
wantHeader: map[string]string{
"authorization": "token MYTOKEN",
"user-agent": "GitHub CLI v1.2.3",
"accept": "",
},
wantStderr: "",
},
{
name: "github.com no authentication token",
args: args{
config: tinyConfig{"example.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
setAccept: true,
},
host: "github.com",
wantHeader: map[string]string{
"authorization": "",
"user-agent": "GitHub CLI v1.2.3",
"accept": "application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview",
},
wantStderr: "",
},
{
name: "github.com in verbose mode",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
setAccept: true,
},
host: "github.com",
envDebug: "api",
setGhDebug: false,
wantHeader: map[string]string{
"authorization": "token MYTOKEN",
"user-agent": "GitHub CLI v1.2.3",
"accept": "application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview",
},
wantStderr: heredoc.Doc(`
* Request at <time>
* Request to http://<host>:<port>
> GET / HTTP/1.1
> Host: github.com
> Accept: application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview
> Authorization: token ████████████████████
> Content-Type: application/json; charset=utf-8
> Time-Zone: <timezone>
> User-Agent: GitHub CLI v1.2.3
< HTTP/1.1 204 No Content
< Date: <time>
* Request took <duration>
`),
},
{
name: "github.com in verbose mode",
args: args{
config: tinyConfig{"github.com:oauth_token": "MYTOKEN"},
appVersion: "v1.2.3",
setAccept: true,
},
host: "github.com",
envGhDebug: "api",
setGhDebug: true,
wantHeader: map[string]string{
"authorization": "token MYTOKEN",
"user-agent": "GitHub CLI v1.2.3",
"accept": "application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview",
},
wantStderr: heredoc.Doc(`
* Request at <time>
* Request to http://<host>:<port>
> GET / HTTP/1.1
> Host: github.com
> Accept: application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview
> Authorization: token ████████████████████
> Content-Type: application/json; charset=utf-8
> Time-Zone: <timezone>
> User-Agent: GitHub CLI v1.2.3
< HTTP/1.1 204 No Content
< Date: <time>
* Request took <duration>
`),
},
{
name: "GHES Accept header",
args: args{
config: tinyConfig{"example.com:oauth_token": "GHETOKEN"},
appVersion: "v1.2.3",
setAccept: true,
},
host: "example.com",
wantHeader: map[string]string{
"authorization": "token GHETOKEN",
"user-agent": "GitHub CLI v1.2.3",
"accept": "application/vnd.github.merge-info-preview+json, application/vnd.github.nebula-preview",
},
wantStderr: "",
},
}
var gotReq *http.Request
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotReq = r
w.WriteHeader(http.StatusNoContent)
}))
defer ts.Close()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("DEBUG", tt.envDebug)
if tt.setGhDebug {
t.Setenv("GH_DEBUG", tt.envGhDebug)
} else {
os.Unsetenv("GH_DEBUG")
}
ios, _, _, stderr := iostreams.Test()
client, err := NewHTTPClient(HTTPClientOptions{
AppVersion: tt.args.appVersion,
Config: tt.args.config,
Log: ios.ErrOut,
SkipAcceptHeaders: !tt.args.setAccept,
})
require.NoError(t, err)
req, err := http.NewRequest("GET", ts.URL, nil)
req.Header.Set("time-zone", "Europe/Amsterdam")
req.Host = tt.host
require.NoError(t, err)
res, err := client.Do(req)
require.NoError(t, err)
for name, value := range tt.wantHeader {
assert.Equal(t, value, gotReq.Header.Get(name), name)
}
assert.Equal(t, 204, res.StatusCode)
assert.Equal(t, tt.wantStderr, normalizeVerboseLog(stderr.String()))
})
}
}
type tinyConfig map[string]string
func (c tinyConfig) Token(host string) (string, string) {
return c[fmt.Sprintf("%s:%s", host, "oauth_token")], "oauth_token"
}
var requestAtRE = regexp.MustCompile(`(?m)^\* Request at .+`)
var dateRE = regexp.MustCompile(`(?m)^< Date: .+`)
var hostWithPortRE = regexp.MustCompile(`127\.0\.0\.1:\d+`)
var durationRE = regexp.MustCompile(`(?m)^\* Request took .+`)
var timezoneRE = regexp.MustCompile(`(?m)^> Time-Zone: .+`)
func normalizeVerboseLog(t string) string {
t = requestAtRE.ReplaceAllString(t, "* Request at <time>")
t = hostWithPortRE.ReplaceAllString(t, "<host>:<port>")
t = dateRE.ReplaceAllString(t, "< Date: <time>")
t = durationRE.ReplaceAllString(t, "* Request took <duration>")
t = timezoneRE.ReplaceAllString(t, "> Time-Zone: <timezone>")
return t
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01400103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.014.001.03 Document"`
Message *AcceptorDiagnosticResponseV03 `xml:"AccptrDgnstcRspn"`
}
func (d *Document01400103) AddMessage() *AcceptorDiagnosticResponseV03 {
d.Message = new(AcceptorDiagnosticResponseV03)
return d.Message
}
// The AcceptorDiagnosticResponse message is sent by the acquirer (or its agent) to provide to the acceptor the result of the diagnostic request.
type AcceptorDiagnosticResponseV03 struct {
// Diagnostic response message management information.
Header *iso20022.Header7 `xml:"Hdr"`
// Information related to the diagnostic response.
DiagnosticResponse *iso20022.AcceptorDiagnosticResponse3 `xml:"DgnstcRspn"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType8 `xml:"SctyTrlr"`
}
func (a *AcceptorDiagnosticResponseV03) AddHeader() *iso20022.Header7 {
a.Header = new(iso20022.Header7)
return a.Header
}
func (a *AcceptorDiagnosticResponseV03) AddDiagnosticResponse() *iso20022.AcceptorDiagnosticResponse3 {
a.DiagnosticResponse = new(iso20022.AcceptorDiagnosticResponse3)
return a.DiagnosticResponse
}
func (a *AcceptorDiagnosticResponseV03) AddSecurityTrailer() *iso20022.ContentInformationType8 {
a.SecurityTrailer = new(iso20022.ContentInformationType8)
return a.SecurityTrailer
}
|
package util
import (
log "github.com/sirupsen/logrus"
"sigs.k8s.io/yaml"
)
func MustUnmarshallYAML(text string, v interface{}) {
err := yaml.UnmarshalStrict([]byte(text), v)
if err != nil {
log.Warnf("invalid YAML: %v", err)
err = yaml.Unmarshal([]byte(text), v)
}
if err != nil {
panic(err)
}
}
|
package connection
import (
"database/sql"
"go-mysql/config"
"time"
)
func GetGoblogConn () *sql.DB{
dbgoblog := config.GetDbByPath("goblog").GetDb()
dbgoblog.SetMaxIdleConns(0)
dbgoblog.SetConnMaxLifetime(300 * time.Second)
return dbgoblog
}
|
package main
import (
"fmt"
"github.com/pkg/errors"
)
type sampleError struct {
s string
}
func (e *sampleError) Error() string {
return e.s
}
func (e *sampleError) String() string {
return e.Error()
}
func main() {
err := errors.Wrap(mkError("test"), ":wrap") // Wrap()時にはwithStackされる
if isStringer(errors.Cause(err)) {
// e := errors.Unwrap(err) //Unwrapはerr.Unwarp()を呼び出す。詳細情報用。Causeを返す訳ではない
fmt.Printf("%+v\n", err)
}
}
func isStringer(err error) bool {
_, ok := err.(fmt.Stringer)
return ok
}
func mkError(s string) error {
return errors.WithStack(&sampleError{s}) // stacktrace付与
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package apilib
import (
"fmt"
"github.com/iotaledger/wasp/packages/coretypes/requestargs"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address/signaturescheme"
"github.com/iotaledger/wasp/client/level1"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/sctransaction"
"github.com/iotaledger/wasp/packages/sctransaction/txbuilder"
)
type RequestSectionParams struct {
TargetContractID coretypes.ContractID
EntryPointCode coretypes.Hname
TimeLock uint32
Transfer coretypes.ColoredBalances // should not not include request token. It is added automatically
Args requestargs.RequestArgs
}
type CreateRequestTransactionParams struct {
Level1Client level1.Level1Client
SenderSigScheme signaturescheme.SignatureScheme
RequestSectionParams []RequestSectionParams
Post bool
WaitForConfirmation bool
}
func CreateRequestTransaction(par CreateRequestTransactionParams) (*sctransaction.Transaction, error) {
senderAddr := par.SenderSigScheme.Address()
allOuts, err := par.Level1Client.GetConfirmedAccountOutputs(&senderAddr)
if err != nil {
return nil, fmt.Errorf("can't get outputs from the node: %v", err)
}
txb, err := txbuilder.NewFromOutputBalances(allOuts)
if err != nil {
return nil, err
}
for _, sectPar := range par.RequestSectionParams {
reqSect := sctransaction.NewRequestSectionByWallet(sectPar.TargetContractID, sectPar.EntryPointCode).
WithTimelock(sectPar.TimeLock).
WithTransfer(sectPar.Transfer)
reqSect.WithArgs(sectPar.Args)
err = txb.AddRequestSection(reqSect)
if err != nil {
return nil, err
}
}
tx, err := txb.Build(false)
//dump := txb.Dump()
if err != nil {
return nil, err
}
tx.Sign(par.SenderSigScheme)
// semantic check just in case
if _, err := tx.Properties(); err != nil {
return nil, err
}
//fmt.Printf("$$$$ dumping builder for %s\n%s\n", tx.ID().String(), dump)
if !par.Post {
return tx, nil
}
if !par.WaitForConfirmation {
if err = par.Level1Client.PostTransaction(tx.Transaction); err != nil {
return nil, err
}
return tx, nil
}
err = par.Level1Client.PostAndWaitForConfirmation(tx.Transaction)
if err != nil {
return nil, err
}
return tx, nil
}
|
package server
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestUnmarshalJSON(t *testing.T) {
u := repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[1, 2, 3]")))
assert.Equal(t, []int64{1, 2, 3}, u.int64Val)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[1.2, 2.3, 3.4]")))
assert.Equal(t, []float64{1.2, 2.3, 3.4}, u.doubleVal)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[\"foo\", \"bar\"]")))
assert.Equal(t, []string{"foo", "bar"}, u.stringVal)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[true, false, true]")))
assert.Equal(t, []bool{true, false, true}, u.boolVal)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[[1, 2, 3], [4, 5, 6]]")))
assert.Equal(t, [][]int64{{1, 2, 3}, {4, 5, 6}}, u.int64ListVal)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[[1.2, 2.3, 3.4], [10.2, 20.3, 30.4]]")))
assert.Equal(t, [][]float64{{1.2, 2.3, 3.4}, {10.2, 20.3, 30.4}}, u.doubleListVal)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[[\"foo\", \"bar\"], [\"foo2\", \"bar2\"]]")))
assert.Equal(t, [][]string{{"foo", "bar"}, {"foo2", "bar2"}}, u.stringListVal)
u = repeatedValue{}
assert.Nil(t, u.UnmarshalJSON([]byte("[[true, false, true], [false, true, false]]")))
assert.Equal(t, [][]bool{{true, false, true}, {false, true, false}}, u.boolListVal)
}
|
package main
import "fmt"
//通道缓冲区
func main() {
//创建一个缓冲区,缓冲区大小为2,缓冲区的类型为int
ints := make(chan int, 2)
//往缓冲区里放数据
ints <- 5
ints <- 10
//从缓冲区里取数据
a := <-ints
b := <-ints
fmt.Println(a)
fmt.Println(b)
}
|
package client
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetAuthString(t *testing.T) {
_ = os.Setenv("ARGO_TOKEN", "my-token")
defer func() { _ = os.Unsetenv("ARGO_TOKEN") }()
assert.Equal(t, "my-token", GetAuthString())
}
func TestNamespace(t *testing.T) {
_ = os.Setenv("ARGO_NAMESPACE", "my-ns")
defer func() { _ = os.Unsetenv("ARGO_NAMESPACE") }()
assert.Equal(t, "my-ns", Namespace())
}
|
package checks
import (
"encoding/json"
"fmt"
"os"
"github.com/xeipuuv/gojsonschema"
"github.com/yugabyte/yugabyte-db/managed/yba-installer/common"
log "github.com/yugabyte/yugabyte-db/managed/yba-installer/logging"
"sigs.k8s.io/yaml"
)
var ValidateInstallerConfig = &validateConfigCheck{"validate-config", false}
type validateConfigCheck struct {
name string
skipAllowed bool
}
func (s validateConfigCheck) Name() string {
return s.name
}
func (s validateConfigCheck) SkipAllowed() bool {
return s.skipAllowed
}
// Execute runs the check. Will validate there is enough disk space
func (s validateConfigCheck) Execute() Result {
res := Result{
Check: s.name,
Status: StatusPassed,
}
res.Error = validateJSONSchema()
if res.Error != nil {
res.Status = StatusCritical
}
return res
}
// ValidateJSONSchema checks that the parameters in each component's config file are indeed
// valid by turning the input YAML file into a JSON file, and then validating that
// the parameters have been specified appropriately using the available
// JSON schema.
func validateJSONSchema() error {
createdBytes, err := os.ReadFile(common.InputFile())
if err != nil {
log.Fatal(fmt.Sprintf("Error: %v.", err))
}
jsonString, jsonStringErr := yaml.YAMLToJSON(createdBytes)
if jsonStringErr != nil {
return fmt.Errorf("Error: %v.\n", jsonStringErr)
}
var jsonData map[string]interface{}
if jsonDataError := json.Unmarshal([]byte(jsonString), &jsonData); jsonDataError != nil {
return fmt.Errorf("Error: %v.\n", jsonDataError)
}
jsonBytesInput, _ := json.Marshal(jsonData)
jsonStringInput := string(jsonBytesInput)
configDirPath := common.GetTemplatesDir()
jsonSchemaName := fmt.Sprintf("file://%s/yba-installer-input-json-schema.json", configDirPath)
schemaLoader := gojsonschema.NewReferenceLoader(jsonSchemaName)
documentLoader := gojsonschema.NewStringLoader(jsonStringInput)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
// Panic to automatically exit the Templating Phase if the passed-in parameters are
// not valid.
if err != nil {
return err
}
if !result.Valid() {
errMsg := "The config at " + common.InputFile() + " is not valid. Errors: \n"
for _, desc := range result.Errors() {
errMsg += fmt.Sprintf("- %s\n", desc)
}
log.Info(errMsg)
return fmt.Errorf(errMsg)
}
log.Info("Config at " + common.InputFile() + " was found to be valid.")
return nil
}
|
package testutil
var IntrospectionQuery = `
query IntrospectionQuery {
__schema {
queryType { name }
mutationType { name }
subscriptionType { name }
types {
...FullType
}
directives {
name
description
locations
args {
...InputValue
}
# deprecated, but included for coverage till removed
onOperation
onFragment
onField
}
}
}
fragment FullType on __Type {
kind
name
description
fields(includeDeprecated: true) {
name
description
args {
...InputValue
}
type {
...TypeRef
}
isDeprecated
deprecationReason
}
inputFields {
...InputValue
}
interfaces {
...TypeRef
}
enumValues(includeDeprecated: true) {
name
description
isDeprecated
deprecationReason
}
possibleTypes {
...TypeRef
}
}
fragment InputValue on __InputValue {
name
description
type { ...TypeRef }
defaultValue
}
fragment TypeRef on __Type {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
}
}
}
}
}
}
}
}
`
|
package index1
import "testing"
func TestMethod1(t *testing.T) {
sum := Method1(10)
if sum != 55 {
t.Log("测试数据不符合预期")
t.FailNow()
}
t.Log("测试成功")
}
|
package main
import (
"flag"
"fmt"
"os"
)
func main() {
// 对于flag而言,第一个字段为命名,第二个字段为默认值,第三个值为帮助信息
name := flag.String("name", "张三", "姓名")
age := flag.Int("age", 18, "年龄")
married := flag.Bool("married", false, "婚否")
delay := flag.Duration("d", 0, "时间间隔")
flag.Parse()
fmt.Println("os args is", os.Args)
fmt.Println(*name, *age, *married, *delay)
}
|
package cmd
import (
"github.com/bitmaelum/bitmaelum-suite/cmd/bm-client/handlers"
"github.com/spf13/cobra"
)
var listAccountsCmd = &cobra.Command{
Use: "list-accounts",
Aliases: []string{"list-account", "ls", "list"},
Short: "List your accounts",
Long: `Displays a list of all your accounts currently available`,
Run: func(cmd *cobra.Command, args []string) {
vault := OpenVault()
handlers.ListAccounts(vault, *displayKeys)
},
}
var displayKeys *bool
func init() {
rootCmd.AddCommand(listAccountsCmd)
displayKeys = listAccountsCmd.Flags().BoolP("keys", "k", false, "Display private and public key")
}
|
package main
import (
"fmt"
"time"
)
func main() {
timeObj := time.Now()
year := timeObj.Year()
month := timeObj.Month()
day := timeObj.Day()
fmt.Printf("%d-%02d-%02d \n", year, month, day)
/**
时间类型有一个自带的方法 Format进行格式化
需要注意的是Go语言中格式化时间模板不是长久的 Y-m-d H:M:S
而是使用Go的诞生时间 2006年1月2日 15点04分 (记忆口诀:2006 1 2 3 4 5)
*/
timeObj2 := time.Now()
fmt.Println(timeObj2.Format("2006-01-02 03:04:05"))
/**
获取当前时间戳
*/
timeObj3 := time.Now()
// 获取秒时间戳
unixTime := timeObj3.Unix()
// 获取毫秒时间戳
unixNaTime := timeObj3.UnixNano()
fmt.Println(unixTime)
fmt.Println(unixNaTime)
// 时间戳转换年月日时分秒(一个参数是秒,另一个参数是毫秒)
var timeObj4 = time.Unix(1595289901, 0)
var timeStr = timeObj4.Format("2006-01-02 15:04:05")
fmt.Println(timeStr)
// 日期字符串转换成时间戳
var timeStr2 = "2020-07-21 08:10:05";
var tmp = "2006-01-02 15:04:05"
timeObj5, _ := time.ParseInLocation(tmp, timeStr2, time.Local)
fmt.Println(timeObj5.Unix())
// 时间相加
now := time.Now()
// 当前时间加1个小时后
later := now.Add(time.Hour)
fmt.Println(later)
// 定时器, 定义一个1秒间隔的定时器
ticker := time.NewTicker(time.Second)
n := 0
for i := range ticker.C {
fmt.Println(i)
n++
if n>5 {
ticker.Stop()
return
}
}
for {
time.Sleep(time.Second)
fmt.Println("一秒后")
}
} |
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package ionhash
import (
"math"
"testing"
"github.com/amzn/ion-go/ion"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func compareReaders(t *testing.T, reader1, reader2 ion.Reader) {
for hasNext(t, reader1, reader2) {
type1 := reader1.Type()
type2 := reader2.Type()
require.Equal(t, type1.String(), type2.String(), "Ion Types did not match")
if type1 == ion.NoType {
break
}
ionHashReader, ok := reader2.(*hashReader)
require.True(t, ok, "Expected reader2 to be of type hashReader")
if ionHashReader.IsInStruct() {
compareFieldNames(t, reader1, reader2)
}
compareAnnotations(t, reader1, reader2)
isNull1 := reader1.IsNull()
isNull2 := reader2.IsNull()
require.Equal(t, isNull1, isNull2, "Expected readers to have matching IsNull() values")
if type1 == ion.NullType {
assert.True(t, isNull1, "Expected reader1.IsNull() to return true")
assert.True(t, isNull2, "Expected reader2.IsNull() to return true")
} else if ion.IsScalar(type1) {
compareScalars(t, type1, reader1, reader2)
} else if ion.IsContainer(type1) {
if !isNull1 {
assert.NoError(t, reader1.StepIn(), "Something went wrong executing reader1.StepIn()")
assert.NoError(t, reader2.StepIn(), "Something went wrong executing reader2.StepIn()")
compareReaders(t, reader1, reader2)
assert.NoError(t, reader1.StepOut(), "Something went wrong executing reader1.StepOut()")
assert.NoError(t, reader2.StepOut(), "Something went wrong executing reader2.StepOut()")
}
} else {
t.Error(&InvalidIonTypeError{type1})
}
}
assert.False(t, hasNext(t, reader1, reader2), "Expected hasNext() to return false")
}
// hasNext() checks that the readers have a Next value.
func hasNext(t *testing.T, reader1, reader2 ion.Reader) bool {
next1 := reader1.Next()
next2 := reader2.Next()
assert.Equal(t, next1, next2, "next results don't match")
if !next1 {
assert.NoError(t, reader1.Err(), "Something went wrong executing reader1.next()")
}
if !next2 {
assert.NoError(t, reader2.Err(), "Something went wrong executing reader2.next()")
}
return next1 && next2
}
func compareFieldNames(t *testing.T, reader1, reader2 ion.Reader) {
token1, err := reader1.FieldName()
require.NoError(t, err, "Something went wrong executing reader1.FieldName()")
token2, err := reader2.FieldName()
require.NoError(t, err, "Something went wrong executing reader2.FieldName()")
require.True(t, token1.Equal(token2), "Expected field names to match")
}
func compareAnnotations(t *testing.T, reader1, reader2 ion.Reader) {
an1, err := reader1.Annotations()
require.NoError(t, err, "Something went wrong executing reader1.Annotations()")
an2, err := reader2.Annotations()
require.NoError(t, err, "Something went wrong executing reader2.Annotations()")
require.Equal(t, len(an1), len(an2), "Expected readers to have same number of annotations")
for i := 0; i < len(an1); i++ {
assert.True(t, an1[i].Equal(&an2[i]))
}
}
func compareScalars(t *testing.T, ionType ion.Type, reader1, reader2 ion.Reader) {
isNull1 := reader1.IsNull()
isNull2 := reader2.IsNull()
require.Equal(t, isNull1, isNull2, "Expected readers to be both null or both non-null")
if isNull1 {
return
}
switch ionType {
case ion.BoolType:
value1, err := reader1.BoolValue()
assert.NoError(t, err, "Something went wrong executing reader1.BoolValue()")
value2, err := reader2.BoolValue()
assert.NoError(t, err, "Something went wrong executing reader2.BoolValue()")
assert.Equal(t, value1, value2, "Expected bool values to match")
case ion.IntType:
intSize, err := reader1.IntSize()
assert.NoError(t, err, "Something went wrong executing reader1.IntSize()")
switch intSize {
case ion.Int32:
int1, err := reader1.IntValue()
assert.NoError(t, err, "Something went wrong executing reader1.IntValue()")
int2, err := reader2.IntValue()
assert.NoError(t, err, "Something went wrong executing reader2.IntValue()")
assert.Equal(t, int1, int2, "Expected int values to match")
case ion.Int64:
int1, err := reader1.Int64Value()
assert.NoError(t, err, "Something went wrong executing reader1.Int64Value()")
int2, err := reader2.Int64Value()
assert.NoError(t, err, "Something went wrong executing reader2.Int64Value()")
assert.Equal(t, int1, int2, "Expected int values to match")
case ion.BigInt:
bigInt1, err := reader1.BigIntValue()
assert.NoError(t, err, "Something went wrong executing reader1.BigIntValue()")
bigInt2, err := reader2.BigIntValue()
assert.NoError(t, err, "Something went wrong executing reader2.BigIntValue()")
assert.Equal(t, bigInt1, bigInt2, "Expected big int values to match")
default:
t.Error("Expected intSize to be one of Int32, Int64, Uint64, or BigInt")
}
case ion.FloatType:
float1, err := reader1.FloatValue()
assert.NoError(t, err, "Something went wrong executing reader1.FloatValue()")
float2, err := reader2.FloatValue()
assert.NoError(t, err, "Something went wrong executing reader2.FloatValue()")
require.True(t, (float1 == nil) == (float2 == nil),
"Expected float values to be either both null or both not null")
if float1 != nil {
if !math.IsNaN(*float1) && !math.IsNaN(*float2) {
assert.Equal(t, float1, float2, "Expected float values to match")
} else if !math.IsNaN(*float1) || !math.IsNaN(*float2) {
assert.NotEqual(t, float1, float2, "Expected IsNaN float value to differ from a non-IsNaN float value")
}
}
case ion.DecimalType:
decimal1, err := reader1.DecimalValue()
assert.NoError(t, err, "Something went wrong executing reader1.DecimalValue()")
decimal2, err := reader2.DecimalValue()
assert.NoError(t, err, "Something went wrong executing reader2.DecimalValue()")
decimalStrictEquals(t, decimal1, decimal2)
case ion.TimestampType:
timestamp1, err := reader1.TimestampValue()
assert.NoError(t, err, "Something went wrong executing reader1.TimestampValue()")
timestamp2, err := reader2.TimestampValue()
assert.NoError(t, err, "Something went wrong executing reader2.TimestampValue()")
assert.Equal(t, timestamp1, timestamp2, "Expected timestamp values to match")
case ion.StringType:
str1, err := reader1.StringValue()
assert.NoError(t, err, "Something went wrong executing reader1.StringValue()")
str2, err := reader2.StringValue()
assert.NoError(t, err, "Something went wrong executing reader2.StringValue()")
assert.Equal(t, str1, str2, "Expected string values to match")
case ion.SymbolType:
token1, err := reader1.SymbolValue()
require.NoError(t, err, "Something went wrong executing reader1.SymbolValue()")
token2, err := reader2.SymbolValue()
require.NoError(t, err, "Something went wrong executing reader2.SymbolValue()")
if isNull1 {
assert.Nil(t, token1.Text, "Expected token1 to have null text")
assert.Nil(t, token2.Text, "Expected token2 to have null text")
} else {
require.Equal(t, token1.Text == nil, token2.Text == nil,
"Expected the text of both tokens to be null or both not null")
if token1.Text == nil {
assert.Equal(t, token1.LocalSID, token2.LocalSID, "Expected token SIDs to match")
} else {
assert.Equal(t, token1.Text, token2.Text, "Expected token to have matching text")
}
}
case ion.BlobType, ion.ClobType:
b1, err := reader1.ByteValue()
assert.NoError(t, err, "Something went wrong executing reader1.ByteValue()")
b2, err := reader2.ByteValue()
assert.NoError(t, err, "Something went wrong executing reader2.ByteValue()")
assert.True(t, b1 != nil && b2 != nil, "Expected byte arrays to be non-null")
assert.Equal(t, len(b1), len(b2), "Expected byte arrays to have same length")
assert.Equal(t, b1, b2, "Expected byte arrays to match")
default:
t.Error(InvalidIonTypeError{ionType})
}
}
// decimalStrictEquals() compares two Ion Decimal values by equality and negative zero.
func decimalStrictEquals(t *testing.T, decimal1, decimal2 *ion.Decimal) {
assert.Equal(t, decimal1, decimal2, "Expected decimal values to match")
zeroDecimal := ion.NewDecimalInt(0)
negativeZero1 := decimal1.Equal(zeroDecimal) && decimal1.Sign() < 0
negativeZero2 := decimal2.Equal(zeroDecimal) && decimal2.Sign() < 0
assert.Equal(t, negativeZero1, negativeZero2,
"Expected decimal values to be both negative zero or both not negative zero")
assert.True(t, decimal1.Equal(decimal2), "Expected decimal1.Equal(decimal2) to return true")
assert.True(t, decimal2.Equal(decimal1), "Expected decimal2.Equal(decimal1) to return true")
}
// Read all the values in the reader and write them in the writer.
func writeFromReaderToWriter(t *testing.T, reader ion.Reader, writer ion.Writer, errExpected bool) {
for reader.Next() {
name, err := reader.FieldName()
require.NoError(t, err, "Something went wrong executing reader.Annotations()")
if name != nil {
require.NoError(t, writer.FieldName(*name), "Something went wrong executing writer.FieldName(*name)")
}
annotations, err := reader.Annotations()
require.NoError(t, err, "Something went wrong executing reader.Annotations()")
if len(annotations) > 0 {
require.NoError(t, writer.Annotations(annotations...), "Something went wrong executing writer.Annotations(annotations...)")
}
currentType := reader.Type()
if reader.IsNull() {
require.NoError(t, writer.WriteNullType(currentType),
"Something went wrong executing writer.WriteNullType(currentType)")
continue
}
switch currentType {
case ion.NullType:
assert.NoError(t, writer.WriteNullType(ion.NullType), "Something went wrong while writing a Null value")
case ion.BoolType:
val, err := reader.BoolValue()
assert.NoError(t, err, "Something went wrong when reading Boolean value")
if val == nil {
assert.NoError(t, writer.WriteNullType(ion.BoolType))
} else {
assert.NoError(t, writer.WriteBool(*val), "Something went wrong while writing a Boolean value")
}
case ion.IntType:
intSize, err := reader.IntSize()
require.NoError(t, err, "Something went wrong when retrieving the Int size")
switch intSize {
case ion.Int32, ion.Int64:
val, err := reader.Int64Value()
assert.NoError(t, err, "Something went wrong when reading Int value")
assert.NoError(t, writer.WriteInt(*val), "Something went wrong when writing Int value")
case ion.BigInt:
val, err := reader.BigIntValue()
assert.NoError(t, err, "Something went wrong when reading Big Int value")
assert.NoError(t, writer.WriteBigInt(val), "Something went wrong when writing Big Int value")
default:
t.Error("Expected intSize to be one of Int32, Int64, Uint64, or BigInt")
}
case ion.FloatType:
val, err := reader.FloatValue()
assert.NoError(t, err, "Something went wrong when reading Float value")
assert.NoError(t, writer.WriteFloat(*val), "Something went wrong when writing Float value")
case ion.DecimalType:
val, err := reader.DecimalValue()
assert.NoError(t, err, "Something went wrong when reading Decimal value")
assert.NoError(t, writer.WriteDecimal(val), "Something went wrong when writing Decimal value")
case ion.TimestampType:
val, err := reader.TimestampValue()
assert.NoError(t, err, "Something went wrong when reading Timestamp value")
assert.NoError(t, writer.WriteTimestamp(*val), "Something went wrong when writing Timestamp value")
case ion.SymbolType:
val, err := reader.SymbolValue()
assert.NoError(t, err, "Something went wrong when reading Symbol value")
assert.NoError(t, writer.WriteSymbol(*val), "Something went wrong when writing Symbol value")
case ion.StringType:
val, err := reader.StringValue()
assert.NoError(t, err, "Something went wrong when reading String value")
require.NotNil(t, val)
assert.NoError(t, writer.WriteString(*val), "Something went wrong when writing String value")
case ion.ClobType:
val, err := reader.ByteValue()
assert.NoError(t, err, "Something went wrong when reading Clob value")
assert.NoError(t, writer.WriteClob(val), "Something went wrong when writing Clob value")
case ion.BlobType:
val, err := reader.ByteValue()
assert.NoError(t, err, "Something went wrong when reading Blob value")
assert.NoError(t, writer.WriteBlob(val), "Something went wrong when writing Blob value")
case ion.SexpType:
require.NoError(t, reader.StepIn(), "Something went wrong executing reader.StepIn()")
require.NoError(t, writer.BeginSexp(), "Something went wrong executing writer.BeginSexp()")
writeFromReaderToWriter(t, reader, writer, errExpected)
err := reader.StepOut()
if !errExpected {
require.NoError(t, err, "Something went wrong executing reader.StepOut()")
}
require.NoError(t, writer.EndSexp(), "Something went wrong executing writer.EndSexp()")
case ion.ListType:
require.NoError(t, reader.StepIn(), "Something went wrong executing reader.StepIn()")
require.NoError(t, writer.BeginList(), "Something went wrong executing writer.BeginList()")
writeFromReaderToWriter(t, reader, writer, errExpected)
err := reader.StepOut()
if !errExpected {
require.NoError(t, err, "Something went wrong executing reader.StepOut()")
}
require.NoError(t, writer.EndList(), "Something went wrong executing writer.EndList()")
case ion.StructType:
require.NoError(t, reader.StepIn(), "Something went wrong executing reader.StepIn()")
require.NoError(t, writer.BeginStruct(), "Something went wrong executing writer.BeginStruct()")
writeFromReaderToWriter(t, reader, writer, errExpected)
err := reader.StepOut()
if !errExpected {
require.NoError(t, err, "Something went wrong executing reader.StepOut()")
}
require.NoError(t, writer.EndStruct(), "Something went wrong executing writer.EndStruct()")
}
}
if !errExpected {
assert.NoError(t, reader.Err(), "Something went wrong writing from reader to writer")
}
}
func writeToWriters(t *testing.T, reader ion.Reader, writers ...ion.Writer) {
ionType := reader.Type()
annotations, err := reader.Annotations()
require.NoError(t, err, "Something went wrong executing reader.Annotations()")
if len(annotations) > 0 {
for _, writer := range writers {
require.NoError(t, writer.Annotations(annotations...),
"Something went wrong executing writer.Annotations(annotations...)")
}
}
fieldName, err := reader.FieldName()
if err == nil && fieldName != nil && (fieldName.Text == nil || (*fieldName.Text != "ion" && *fieldName.Text != "10n")) {
for _, writer := range writers {
require.NoError(t, writer.FieldName(*fieldName),
"Something went wrong executing writer.FieldName(*fieldName)")
}
}
if reader.IsNull() {
for _, writer := range writers {
require.NoError(t, writer.WriteNullType(reader.Type()),
"Something went wrong executing writer.WriteNullType(reader.Type())")
}
return
}
switch ionType {
case ion.NullType:
for _, writer := range writers {
require.NoError(t, writer.WriteNull(), "Something went wrong executing writer.WriteNull()")
}
case ion.BoolType:
boolValue, err := reader.BoolValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteBool(*boolValue),
"Something went wrong executing writer.WriteBool(*boolValue)")
}
case ion.BlobType:
byteValue, err := reader.ByteValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteBlob(byteValue),
"Something went wrong executing writer.WriteBlob(byteValue)")
}
case ion.ClobType:
byteValue, err := reader.ByteValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteClob(byteValue),
"Something went wrong executing writer.WriteClob(byteValue)")
}
case ion.DecimalType:
decimalValue, err := reader.DecimalValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteDecimal(decimalValue),
"Something went wrong executing writer.WriteDecimal(decimalValue)")
}
case ion.FloatType:
floatValue, err := reader.FloatValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteFloat(*floatValue),
"Something went wrong executing writer.WriteFloat(*floatValue)")
}
case ion.IntType:
intSize, err := reader.IntSize()
require.NoError(t, err)
switch intSize {
case ion.Int32, ion.Int64:
intValue, err := reader.Int64Value()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteInt(*intValue),
"Something went wrong executing writer.WriteInt(*intValue)")
}
case ion.BigInt:
bigIntValue, err := reader.BigIntValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteBigInt(bigIntValue),
"Something went wrong executing writer.WriteBigInt(bigIntValue)")
}
default:
t.Error("Expected intSize to be one of Int32, Int64, Uint64, or BigInt")
}
case ion.StringType:
stringValue, err := reader.StringValue()
require.NoError(t, err)
require.NotNil(t, stringValue)
for _, writer := range writers {
require.NoError(t, writer.WriteString(*stringValue),
"Something went wrong executing writer.WriteString(stringValue)")
}
case ion.SymbolType:
symbolValue, err := reader.SymbolValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteSymbol(*symbolValue),
"Something went wrong executing writer.WriteSymbol(*symbolValue)")
}
case ion.TimestampType:
timestampValue, err := reader.TimestampValue()
require.NoError(t, err)
for _, writer := range writers {
require.NoError(t, writer.WriteTimestamp(*timestampValue),
"Something went wrong executing writer.WriterTimestamp(*timestampValue)")
}
case ion.SexpType:
require.NoError(t, reader.StepIn())
for _, writer := range writers {
require.NoError(t, writer.BeginSexp(), "Something went wrong executing writer.BeginSexp()")
}
for reader.Next() {
writeToWriters(t, reader, writers...)
}
require.NoError(t, reader.Err(), "Something went wrong executing reader.Next()")
require.NoError(t, reader.StepOut())
for _, writer := range writers {
require.NoError(t, writer.EndSexp(), "Something went wrong executing writer.EndSexp()")
}
case ion.ListType:
require.NoError(t, reader.StepIn())
for _, writer := range writers {
require.NoError(t, writer.BeginList(), "Something went wrong executing writer.BeginList()")
}
for reader.Next() {
writeToWriters(t, reader, writers...)
}
require.NoError(t, reader.Err(), "Something went wrong executing reader.Next()")
require.NoError(t, reader.StepOut())
for _, writer := range writers {
require.NoError(t, writer.EndList(), "Something went wrong executing writer.EndList()")
}
case ion.StructType:
require.NoError(t, reader.StepIn())
for _, writer := range writers {
require.NoError(t, writer.BeginStruct(), "Something went wrong executing writer.BeginStruct()")
}
for reader.Next() {
writeToWriters(t, reader, writers...)
}
require.NoError(t, reader.Err(), "Something went wrong executing reader.Next()")
require.NoError(t, reader.StepOut())
for _, writer := range writers {
require.NoError(t, writer.EndStruct(), "Something went wrong executing writer.EndStruct()")
}
default:
t.Fatal(InvalidIonTypeError{ionType})
}
}
func readSexpAndAppendToList(t *testing.T, reader ion.Reader) []byte {
require.NoError(t, reader.StepIn())
updateBytes := []byte{}
for reader.Next() {
intValue, err := reader.Int64Value()
require.NoError(t, err, "Something went wrong executing reader.Int64Value()")
updateBytes = append(updateBytes, byte(*intValue))
}
require.NoError(t, reader.Err(), "Something went wrong executing reader.Next()")
require.NoError(t, reader.StepOut(), "Something went wrong executing reader.StepOut()")
return updateBytes
}
|
package controllers
import (
"github.com/gin-gonic/gin"
"github.com/sergiolucena1/database"
"github.com/sergiolucena1/models"
"strconv"
)
//Primeiro endpoint
func ShowProduct(c *gin.Context){
id := c.Param("id")
newid, err := strconv.Atoi(id) // convertendo pra inteiro
if err != nil{
c.JSON(400,gin.H{
"error": "ID tem que ser inteiro",
})
return
}
db := database.GetDatabase()
var product models.Product
err = db.First(&product, newid).Error
if err != nil {
c.JSON(400, gin.H{
"error": "Não consigo encontrar o produto:" + err.Error(),
})
return
}
c.JSON(200, product)
}
//Segundo endpoint
func CreateProduct(c *gin.Context){
db := database.GetDatabase()
var product models.Product
err:= c.ShouldBindJSON(&product)
if err != nil{
c.JSON(400, gin.H{
"error": "cannot bind JSON: " + err.Error(),
})
return
}
err = db.Create(&product).Error
if err != nil {
c.JSON(400, gin.H{
"error": "cannot create product: " + err.Error(),
})
}
c.JSON(200, product)
}
func ShowProducts(c *gin.Context){
db := database.GetDatabase()
var products []models.Product
err := db.Find(&products).Error
if err != nil {
c.JSON(400, gin.H{
"error": "cannot list products: " + err.Error(),
})
return
}
c.JSON(200, products)
}
func UpdateProduct(c *gin.Context){
db := database.GetDatabase()
var product models.Product
err:= c.ShouldBindJSON(&product)
if err != nil{
c.JSON(400, gin.H{
"error": "cannot bind JSON: " + err.Error(),
})
return
}
err = db.Save(&product).Error
if err != nil {
c.JSON(400, gin.H{
"error": "cannot update product: " + err.Error(),
})
}
c.JSON(200, product)
}
func DeleteProduct(c *gin.Context){
id := c.Param("id")
newid, err := strconv.Atoi(id) // convertendo pra inteiro
if err != nil{
c.JSON(400,gin.H{
"error": "ID tem que ser inteiro",
})
return
}
db := database.GetDatabase()
err = db.Delete(&models.Product{},newid).Error
if err != nil {
c.JSON(400, gin.H{
"error": "cannot delete product: " + err.Error(),
})
return
}
c.Status(204)
} |
package realestatecomau_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"io/ioutil"
"testing"
)
func TestRealestatecomau(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Realestatecomau Suite")
}
var ReadRealEstateComAu_Buy_list_1 string
var _ = BeforeSuite(func() {
contents, err := ioutil.ReadFile("test_assets/realestate_com_au_buy_list_1.html")
Expect(err).ToNot(HaveOccurred())
ReadRealEstateComAu_Buy_list_1 = string(contents)
})
|
package logs
import (
"testing"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/utils/test/assert"
)
func TestLogTypes(t *testing.T) {
for _, tc := range []struct {
logType string
logTypes []string
}{
{"", nil},
{logTypeAuth, []string{realm.LogTypeAuth, realm.LogTypeAPIKey}},
{logTypeFunction, []string{realm.LogTypeFunction}},
{logTypePush, []string{realm.LogTypePush}},
{logTypeService, []string{realm.LogTypeServiceFunction, realm.LogTypeWebhook, realm.LogTypeServiceStreamFunction, realm.LogTypeStreamFunction}},
{logTypeTrigger, []string{realm.LogTypeAuthTrigger, realm.LogTypeDBTrigger, realm.LogTypeScheduledTrigger}},
{logTypeGraphQL, []string{realm.LogTypeGraphQL}},
{logTypeSync, []string{realm.LogTypeSyncConnectionStart, realm.LogTypeSyncConnectionEnd, realm.LogTypeSyncSessionStart, realm.LogTypeSyncSessionEnd, realm.LogTypeSyncClientWrite, realm.LogTypeSyncError, realm.LogTypeSyncOther}},
{logTypeSchema, []string{realm.LogTypeSchemaAdditiveChange, realm.LogTypeSchemaGeneration, realm.LogTypeSchemaValidation}},
} {
t.Run("should find log types for type "+tc.logType, func(t *testing.T) {
i := listInputs{Types: []string{tc.logType}}
assert.Equal(t, tc.logTypes, i.logTypes())
})
}
}
|
package datasets
import (
"fmt"
"github.com/codeformuenster/dkan-newest-dataset-notifier/util"
"github.com/imroc/req"
)
type DatasetItem struct {
Modified ISODate `json:"modified"`
Issued ISODate `json:"issued"`
Title string `json:"title"`
Description string `json:"description"`
Identifier string `json:"identifier"`
}
type PackageResponse struct {
Result []struct {
URL string `json:"url"`
} `json:"result"`
}
func (d *DatasetItem) ResolveURL(baseURL string) (string, error) {
apiURL, err := util.MakeURL(fmt.Sprintf(
"%s/api/3/action/package_show?id=%s",
baseURL,
d.Identifier,
))
if err != nil {
return "", err
}
r, err := req.Get(apiURL)
if err != nil {
return "", err
}
var foo PackageResponse
err = r.ToJSON(&foo)
if err != nil {
return "", err
}
return foo.Result[0].URL, nil
}
func (d *DatasetItem) ToTootText(baseURL string) (string, error) {
url, err := d.ResolveURL(baseURL)
if err != nil {
return "", err
}
var text string
for _, template := range tweetTemplates {
text = fmt.Sprintf(
template,
d.Title, url,
)
if len(text) < 280 {
return text, nil
}
}
if len(text) < 280 {
return "", fmt.Errorf("Tweet too long (> 280): %s", text)
}
return "", fmt.Errorf("I thought this error will never happen")
}
|
package web_param
type JwtParam struct {
Token string `json:"token"`
}
|
package main
import (
"fmt"
"log"
"net/http"
"evergrid/server/services/status"
"evergrid/db"
"github.com/ant0ine/go-json-rest/rest"
)
func main() {
connection := db.Connection{}
connection.Init()
fmt.Println(*connection.Users())
api := rest.NewApi()
api.Use(rest.DefaultDevStack...)
router, err := rest.MakeRouter(
status.Routes()...,
)
if err != nil {
log.Fatal(err)
}
api.SetApp(router)
http.ListenAndServe(":8080", api.MakeHandler())
log.Print("Goodbye!")
}
|
package fakes
import (
"github.com/cloudfoundry-incubator/notifications/models"
"github.com/cloudfoundry-incubator/notifications/postal"
)
type FakeMailRecipe struct {
DispatchArguments []interface{}
Responses []postal.Response
Error error
TrimCalled bool
}
func (fake *FakeMailRecipe) Dispatch(clientID string, guid postal.TypedGUID,
options postal.Options, conn models.ConnectionInterface) ([]postal.Response, error) {
fake.DispatchArguments = []interface{}{clientID, guid, options}
return fake.Responses, fake.Error
}
func (fake *FakeMailRecipe) Trim(response []byte) []byte {
fake.TrimCalled = true
return response
}
|
package main
import (
"testing"
"net/http"
"net/http/httptest"
"io/ioutil"
"strings"
"encoding/json"
)
func TestShowIndexPageUnauthenticated(t *testing.T) {
r := getRouter(true)
r.GET("/", showIndexPage)
req, _ := http.NewRequest("GET", "/", nil)
testHTTPResponse(t, r, req, func(w *httptest.ResponseRecorder) bool {
statusOK := w.Code == http.StatusOK
p ,err := ioutil.ReadAll(w.Body)
pageOK := err ==nil && strings.Index(string(p), "<title>Home Page</title>") > 0
return statusOK && pageOK
})
}
func TestArticleUnauthenticated(t *testing.T) {
r := getRouter(true)
// Define the route similar to its definition in the routes file
r.GET("/article/view/:article_id", getArticle)
// Create a request to send to the above route
req, _ := http.NewRequest("GET", "/article/view/1", nil)
testHTTPResponse(t, r, req, func(w *httptest.ResponseRecorder) bool {
// Test that the http status code is 200
statusOK := w.Code == http.StatusOK
// Test that the page title is "Article 1"
// You can carry out a lot more detailed tests using libraries that can
// parse and process HTML pages
p, err := ioutil.ReadAll(w.Body)
pageOK := err == nil && strings.Index(string(p), "<title>Article 1</title>") > 0
return statusOK && pageOK
})
}
func TestArticleListJSON(t *testing.T) {
r := getRouter(true)
r.GET("/", showIndexPage)
req, _ := http.NewRequest("GET", "/", nil)
req.Header.Add("Accept", "application/json")
testHTTPResponse(t, r, req, func(w *httptest.ResponseRecorder) bool {
statusOK := w.Code == http.StatusOK
p, err := ioutil.ReadAll(w.Body)
if err != nil {
return false
}
var articles []article
err = json.Unmarshal(p, &articles)
return err == nil && len(articles) >= 2 && statusOK
})
} |
package main
/*
MIT License
Copyright (c) 2019 Horacio Duran <horacio.duran@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import (
"archive/tar"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
)
func buildTar(filesToInclude []string, tarFile io.Writer, relativeTo string) (int64, error) {
tWriter := tar.NewWriter(tarFile)
defer tWriter.Close()
return addToTar(filesToInclude, tWriter, relativeTo)
}
// buildTar will try to write the files passed into the passed ioWriter in tar format
// if the writing fails some data might already be writte.
func addToTar(filesToInclude []string, tWriter *tar.Writer, relativeTo string) (int64, error) {
var contentsSize int64
relativeTo = strings.TrimSuffix(relativeTo, string(os.PathSeparator))
for _, filePath := range filesToInclude {
fInfo, err := os.Stat(filePath)
if err != nil {
return 0, errors.Wrap(err, "accessing file to include in stapled file")
}
// Header
// for now we just dereference symlinks
hdr, err := tar.FileInfoHeader(fInfo, "")
if err != nil {
return 0, errors.Wrap(err, "creating header for file")
}
hdr.Name = strings.TrimPrefix(filePath, relativeTo)
err = tWriter.WriteHeader(hdr)
if err != nil {
return 0, errors.Wrapf(err, "writing header information for %s", filePath)
}
// File
if !fInfo.IsDir() {
contents, err := ioutil.ReadFile(filePath)
if err != nil {
return 0, errors.Wrap(err, "reading file to add into tar")
}
writen, err := tWriter.Write(contents)
if err != nil {
return 0, errors.Wrap(err, "writing file contents into tar")
}
// Fixme: this is wrong?
contentsSize += int64(writen)
continue
}
dirContents, err := filepath.Glob(filepath.Join(filePath, "*"))
if err != nil {
return 0, errors.Wrapf(err, "trying to read the contents of %s", filePath)
}
//for i := range dirContents {
// dirContents[i] = filepath.Join(filePath, dirContents[i])
//}
writen, err := addToTar(dirContents, tWriter, relativeTo)
if err != nil {
return 0, errors.Wrapf(err, "adding to tar the contents of %s", filePath)
}
contentsSize += writen
}
return contentsSize, nil
}
|
package hot100
import (
"strconv"
"strings"
)
// 关键
// 回溯算法 dfs
// 并且,注意 current ,当append 之后是不可以重新初始化的,因为后续的递归dfs 依赖了这个
func restoreIpAddresses(s string) []string {
current:=make([]string,4)
ret:=make([]string,0)
var dfs func(index int,ipIndex int)
dfs= func(index int,ipIndex int) {
// dfs: 先考虑退出条件
// 当当前长度为4的时候,并且 当前index 到了最后,则代表是一个结果集
if ipIndex==4{
if len(current)==4 && index== len(s){
ret=append(ret,strings.Join(current,"."))
}
return
}
// 如果已经有了4元组,但是 index 还没到长度,直接return
if index==len(s){
return
}
if s[index]=='0'{
current[ipIndex]="0"
dfs(index+1,ipIndex+1)
}
// 开始给每个下标进行赋值
add:=0
for i:=index;i<len(s);i++{
// add *10 是因为,每次在上一次移动到下一个的时候,都需要扩大10倍
add=add*10+int(s[i]-'0')
if add>0 && add<=255{
current[ipIndex]=strconv.Itoa(add)
dfs(i+1,ipIndex+1)
}else{
// 说明这个值已经不符合要求了
break
}
}
}
dfs(0,0)
return ret
}
|
package rule
import (
ev "events"
"fmt"
"time"
)
type hisVolList struct{
totalVolume int
curVolume int
qhisVolume []int
}
func makeHisVolList() *hisVolList{
his := hisVolList{}
his.totalVolume = 0
his.curVolume = 0
his.qhisVolume = make([]int, 0, 4)
return &his
}
func (his *hisVolList)increment(vol int){
his.curVolume += vol
}
func (his *hisVolList)update(){
if his.curVolume == 0 {
return
}
if len(his.qhisVolume) == 4{
his.totalVolume -= his.qhisVolume[0]
his.qhisVolume = his.qhisVolume[1:]
}
his.totalVolume += his.curVolume
his.qhisVolume = append(his.qhisVolume,his.curVolume)
his.curVolume = 0
}
func (his *hisVolList)getAverage()(tVol, cnt int){
cnt = len(his.qhisVolume)
tVol = his.totalVolume
return
}
type Rdmn struct{
dailyVolume map[string]*int
hisVolume map[string]*hisVolList
curY,curD int
curM time.Month
}
func getDailyKey(evn *ev.Event)string{
return evn.Broker + "|" + evn.Security
}
func (r *Rdmn)Rupdate(nd * time.Time){
y,m,d := nd.Date()
if r.curD == d && r.curM == m && r.curY == y {
return
}
r.curY = y
r.curM = m
r.curD = d
r.dailyVolume = make(map[string]*int)
for _,v := range r.hisVolume {
v.update()
}
}
func InitRdmn()Rule{
rdmn := Rdmn{}
rdmn.dailyVolume = make(map[string]*int)
rdmn.hisVolume = make(map[string]*hisVolList)
return &rdmn
}
func (r *Rdmn) CheckEvent(evn *ev.Event) (sr string){
if evn.EventType != ev.EVENT_EXECUTE {
return
}
r.Rupdate(&evn.EventTime)
nVol := evn.Volume
key := getDailyKey(evn)
daVol,ok := r.dailyVolume[key]
if !ok {
r.dailyVolume[key] = &nVol
daVol = &nVol
} else {
*daVol += nVol
}
hisVol,ok := r.hisVolume[evn.Security]
if !ok {
hisVol = makeHisVolList()
r.hisVolume[evn.Security] = hisVol
}
hisVol.increment(evn.Volume)
tlVol, cnt := hisVol.getAverage()
if cnt > 0 && float64(*daVol * cnt) > float64(1.5 * float64(tlVol)) {
sr = fmt.Sprintf("D M N: %v has executed over 50%% of the average volume for '%v', the total volume %v, the average is %.2f",
evn.Broker, evn.Security, *daVol, (float64(tlVol))/(float64(cnt)))
}
return
}
|
package main
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
)
func main() {
//Get file directory from user input
fmt.Println("Enter the directory you want to clean (default-../TestFolder): ")
var root string
fmt.Scanln(&root)
var files []string
//Set default directory
if root == "" {
root = "../TestFolder"
}
//Walk walks the file tree rooted at root, calling fn for each file or directory in the tree, including root
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
files = append(files, path)
}
return nil
})
if err != nil {
panic(err)
}
//For each of the files walked through, print the file name, and call removeText func
for _, file := range files {
fmt.Println(file)
removeText(file)
}
}
//Func to remove lines if contains keyword 'TODO'
func removeText(fileName string) {
//Skip main.go file
if fileName != "main.go" {
//Read file bytes from filename param, a success call return err==null, not err==EOF
input, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatalln(err)
}
//Convert content to string
text := string(input)
//Replace keyword 'TODO' by regex
re := regexp.MustCompile(".*TODO.*\r?\n")
lines := re.ReplaceAllString(text, "")
//Write string into a file
err = WriteToFile(fileName, lines)
if err != nil {
log.Fatal(err)
}
}
}
//Func to write string into a file function, with filename param
func WriteToFile(filename string, data string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
_, err = io.WriteString(file, data)
if err != nil {
return err
}
return file.Sync()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.