text stringlengths 11 4.05M |
|---|
package ravendb
import "time"
// DatabaseStatistics describes a result of GetStatisticsCommand
type DatabaseStatistics struct {
LastDocEtag int64 `json:"LastDocEtag"`
CountOfIndexes int `json:"CountOfIndexes"`
CountOfDocuments int64 `json:"CountOfDocuments"`
CountOfRevisionDocuments int64 `json:"CountOfRevisionDocuments"` // TODO: present in Java, not seen in JSON
CountOfDocumentsConflicts int64 `json:"CountOfDocumentsConflicts"`
CountOfTombstones int64 `json:"CountOfTombstones"`
CountOfConflicts int64 `json:"CountOfConflicts"`
CountOfAttachments int64 `json:"CountOfAttachments"`
CountOfUniqueAttachments int64 `json:"CountOfUniqueAttachments"`
Indexes []*IndexInformation `json:"Indexes"`
DatabaseChangeVector string `json:"DatabaseChangeVector"`
DatabaseID string `json:"DatabaseId"`
Is64Bit bool `json:"Is64Bit"`
Pager string `json:"Pager"`
LastIndexingTime *Time `json:"LastIndexingTime"`
SizeOnDisk *Size `json:"SizeOnDisk"`
TempBuffersSizeOnDisk *Size `json:"TempBuffersSizeOnDisk"`
NumberOfTransactionMergerQueueOperations int `json:"NumberOfTransactionMergerQueueOperations"`
}
// GetLastIndexingTime returns last indexing time
func (s *DatabaseStatistics) GetLastIndexingTime() *time.Time {
return s.LastIndexingTime.toTimePtr()
}
/*
public IndexInformation[] getStaleIndexes() {
return Arrays.stream(indexes)
.filter(x -> x.isStale())
.toArray(IndexInformation[]::new);
}
*/
|
package data
import (
"math/rand"
"testing"
)
func BenchmarkStorage_Get(b *testing.B) {
s := New()
for i := 0; i < 10000000; i++ {
str := randomString(6)
s.Add(str, str)
}
input := "abcd"
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = s.Get(input)
}
}
var chars = "abcdefghijklmnop"
func randomString(n int) string {
l := len(chars) - 1
res := ""
m := rand.Intn(n)
for i := 0; i <= m; i++ {
res += string(chars[rand.Intn(l)])
}
return res
}
|
package generator
import (
"bytes"
"fmt"
"html/template"
"sort"
"strings"
"github.com/RomanosTrechlis/blog-generator/config"
)
// Category holds the data for a category
type Category struct {
Name string
Link string
Count int
}
// categoriesGenerator struct
type categoriesGenerator struct {
catPostsMap map[string][]*post
template *template.Template
destination string
siteInfo *config.SiteInformation
}
// Generate creates the categories page
func (g *categoriesGenerator) Generate() (err error) {
fmt.Println("\tGenerating Categories...")
catPostsMap := g.catPostsMap
destination := g.destination
catsPath := fmt.Sprintf("%s/categories", destination)
err = clearAndCreateDestination(catsPath)
if err != nil {
return err
}
err = g.generateCatIndex()
if err != nil {
return err
}
for cat, catPosts := range catPostsMap {
catPagePath := fmt.Sprintf("%s/%s", catsPath, cat)
err = g.generateCatPage(cat, catPosts, catPagePath)
if err != nil {
return err
}
}
fmt.Println("\tFinished generating Categories...")
return nil
}
func (g *categoriesGenerator) generateCatIndex() (err error) {
catTemplatePath := g.siteInfo.ThemeFolder + "categories.html"
tmpl, err := getTemplate(catTemplatePath)
if err != nil {
return err
}
categories := []*Category{}
for cat, posts := range g.catPostsMap {
categories = append(categories, &Category{Name: cat, Link: getCatLink(cat), Count: len(posts)})
}
sort.Sort(categoryByCountDesc(categories))
buf := bytes.Buffer{}
err = tmpl.Execute(&buf, categories)
if err != nil {
return fmt.Errorf("error executing template %s: %v", catTemplatePath, err)
}
c := htmlConfig{
path: fmt.Sprintf("%s/categories", g.destination),
pageTitle: "Categories",
pageNum: 0,
maxPageNum: 0,
isPost: false,
temp: g.template,
content: template.HTML(buf.String()),
siteInfo: g.siteInfo,
}
err = c.writeHTML()
if err != nil {
return err
}
return nil
}
func (g *categoriesGenerator) generateCatPage(cat string, posts []*post, path string) (err error) {
err = clearAndCreateDestination(path)
if err != nil {
return err
}
lg := listingGenerator{
posts: posts,
template: g.template,
destination: path,
pageTitle: cat,
siteInfo: g.siteInfo,
}
err = lg.Generate()
if err != nil {
return err
}
return nil
}
func getCatLink(cat string) (link string) {
link = fmt.Sprintf("/categories/%s/", strings.ToLower(cat))
return link
}
// categoryByCountDesc sorts the cats
type categoryByCountDesc []*Category
func (t categoryByCountDesc) Len() (l int) {
return len(t)
}
func (t categoryByCountDesc) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t categoryByCountDesc) Less(i, j int) (l bool) {
return t[i].Count > t[j].Count
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"sync"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ScreenCaptureAllowed,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Checks that the ScreenCaptureAllowed policy is correctly applied",
Contacts: []string{
"jityao@google.com", // Test author
},
SoftwareDeps: []string{"chrome", "lacros"},
Attr: []string{"group:mainline", "informational"},
Fixture: fixture.LacrosPolicyLoggedIn,
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.ScreenCaptureAllowed{}, pci.VerifiedFunctionalityUI),
},
})
}
func ScreenCaptureAllowed(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Reserve ten seconds for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
for _, param := range []struct {
name string
value *policy.ScreenCaptureAllowed
}{
{
name: "enabled",
value: &policy.ScreenCaptureAllowed{Val: true},
},
{
name: "disabled",
value: &policy.ScreenCaptureAllowed{Val: false},
},
{
name: "unset",
value: &policy.ScreenCaptureAllowed{Stat: policy.StatusUnset},
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Update policies.
if err := policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{param.value}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Open lacros browser.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, browser.TypeLacros)
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
conn, err := br.NewConn(ctx, "")
if err != nil {
s.Fatal("Failed to connect to chrome: ", err)
}
// Any HTTPS URL works.
if err := conn.Navigate(ctx, "https://www.google.com"); err != nil {
s.Fatal("Failed to navigate to https://www.google.com: ", err)
}
defer conn.Close()
// Restrict each permission check to 5 seconds.
uiCtx, uiCancel := context.WithTimeout(ctx, 5*time.Second)
defer uiCancel()
var wg sync.WaitGroup
// Permission should be granted if policy is unset or if policy is enabled.
expected := param.value.Stat == policy.StatusUnset || param.value.Val
if expected {
wg.Add(1)
// Handle media selection source prompt in separate goroutine.
go func() {
defer wg.Done()
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(uiCtx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
mediaPicker := nodewith.Role(role.Window).ClassName("DesktopMediaPickerDialogView")
screenTab := nodewith.Name("Entire Screen").ClassName("Tab").Ancestor(mediaPicker)
shareTarget := nodewith.ClassName("DesktopMediaSourceView").First()
shareButton := nodewith.Name("Share").Role(role.Button)
ui := uiauto.New(tconn)
// Click on "Entire Screen" tab, then on the desktop media source view, and then
// click on Share button.
if err := uiauto.Combine("Select media source",
ui.WaitUntilExists(mediaPicker),
ui.LeftClick(screenTab),
ui.LeftClick(shareTarget),
ui.LeftClick(shareButton),
)(uiCtx); err != nil {
s.Fatal("Failed to select media source: ", err)
}
}()
}
// Check that getDisplayMedia() permissions are correctly allowed or denied.
actual := false
if err := conn.Eval(uiCtx, `navigator.mediaDevices.getDisplayMedia()
.then(() => true)
.catch((err) => {
if (err instanceof DOMException && err.message == "Permission denied") {
return false;
}
throw err;
})
`, &actual); err != nil {
s.Fatal("Could not request for display media: ", err)
}
wg.Wait()
if actual != expected {
s.Fatalf("Unexpected permission granted status, expected %v, got %v", expected, actual)
}
})
}
}
|
package repository
import (
"context"
"github.com/caos/zitadel/internal/project/model"
)
type ProjectRepository interface {
ProjectByID(ctx context.Context, id string) (*model.ProjectView, error)
CreateProject(ctx context.Context, name string) (*model.Project, error)
UpdateProject(ctx context.Context, project *model.Project) (*model.Project, error)
DeactivateProject(ctx context.Context, id string) (*model.Project, error)
ReactivateProject(ctx context.Context, id string) (*model.Project, error)
SearchProjects(ctx context.Context, request *model.ProjectViewSearchRequest) (*model.ProjectViewSearchResponse, error)
SearchProjectGrants(ctx context.Context, request *model.ProjectGrantViewSearchRequest) (*model.ProjectGrantViewSearchResponse, error)
SearchGrantedProjects(ctx context.Context, request *model.ProjectGrantViewSearchRequest) (*model.ProjectGrantViewSearchResponse, error)
ProjectGrantViewByID(ctx context.Context, grantID string) (*model.ProjectGrantView, error)
ProjectMemberByID(ctx context.Context, projectID, userID string) (*model.ProjectMemberView, error)
AddProjectMember(ctx context.Context, member *model.ProjectMember) (*model.ProjectMember, error)
ChangeProjectMember(ctx context.Context, member *model.ProjectMember) (*model.ProjectMember, error)
RemoveProjectMember(ctx context.Context, projectID, userID string) error
SearchProjectMembers(ctx context.Context, request *model.ProjectMemberSearchRequest) (*model.ProjectMemberSearchResponse, error)
GetProjectMemberRoles() []string
AddProjectRole(ctx context.Context, role *model.ProjectRole) (*model.ProjectRole, error)
ChangeProjectRole(ctx context.Context, role *model.ProjectRole) (*model.ProjectRole, error)
RemoveProjectRole(ctx context.Context, projectID, key string) error
SearchProjectRoles(ctx context.Context, projectId string, request *model.ProjectRoleSearchRequest) (*model.ProjectRoleSearchResponse, error)
ProjectChanges(ctx context.Context, id string, lastSequence uint64, limit uint64, sortAscending bool) (*model.ProjectChanges, error)
BulkAddProjectRole(ctx context.Context, role []*model.ProjectRole) error
ApplicationByID(ctx context.Context, appID string) (*model.ApplicationView, error)
AddApplication(ctx context.Context, app *model.Application) (*model.Application, error)
ChangeApplication(ctx context.Context, app *model.Application) (*model.Application, error)
DeactivateApplication(ctx context.Context, projectID, appID string) (*model.Application, error)
ReactivateApplication(ctx context.Context, projectID, appID string) (*model.Application, error)
RemoveApplication(ctx context.Context, projectID, appID string) error
ChangeOIDCConfig(ctx context.Context, config *model.OIDCConfig) (*model.OIDCConfig, error)
ChangeOIDConfigSecret(ctx context.Context, projectID, appID string) (*model.OIDCConfig, error)
SearchApplications(ctx context.Context, request *model.ApplicationSearchRequest) (*model.ApplicationSearchResponse, error)
ApplicationChanges(ctx context.Context, id string, secId string, lastSequence uint64, limit uint64, sortAscending bool) (*model.ApplicationChanges, error)
ProjectGrantByID(ctx context.Context, grantID string) (*model.ProjectGrantView, error)
AddProjectGrant(ctx context.Context, grant *model.ProjectGrant) (*model.ProjectGrant, error)
ChangeProjectGrant(ctx context.Context, grant *model.ProjectGrant) (*model.ProjectGrant, error)
DeactivateProjectGrant(ctx context.Context, projectID, grantID string) (*model.ProjectGrant, error)
ReactivateProjectGrant(ctx context.Context, projectID, grantID string) (*model.ProjectGrant, error)
RemoveProjectGrant(ctx context.Context, projectID, grantID string) error
SearchProjectGrantMembers(ctx context.Context, request *model.ProjectGrantMemberSearchRequest) (*model.ProjectGrantMemberSearchResponse, error)
ProjectGrantMemberByID(ctx context.Context, projectID, userID string) (*model.ProjectGrantMemberView, error)
AddProjectGrantMember(ctx context.Context, member *model.ProjectGrantMember) (*model.ProjectGrantMember, error)
ChangeProjectGrantMember(ctx context.Context, member *model.ProjectGrantMember) (*model.ProjectGrantMember, error)
RemoveProjectGrantMember(ctx context.Context, projectID, grantID, userID string) error
GetProjectGrantMemberRoles() []string
}
|
package krc
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/Shopify/sarama"
"github.com/jbvmio/kafkactl"
"github.com/jbvmio/krc/config"
"github.com/jbvmio/krc/kafka"
"github.com/spf13/cast"
)
func ValidateConfig(config *config.Config) {
if config.LocalBoot == "" {
config.LocalBoot = kafka.SelectBroker(config.LocalBrokers...)
}
if config.RemoteBoot == "" {
config.RemoteBoot = kafka.SelectBroker(config.RemoteBrokers...)
}
}
func InitReplicatedParts(RP []*ReplicatedPartition) error {
var wg sync.WaitGroup
for _, rp := range RP {
wg.Add(1)
go rp.InitReplicatedPartition(&wg)
}
wg.Wait()
return nil
}
func StripMeta(rParts []*ReplicatedPartition) {
var wg sync.WaitGroup
for _, rp := range rParts {
wg.Add(1)
rp.StripMetadata(&wg)
}
wg.Wait()
}
func (rp *ReplicatedPartition) StripMetadata(wg *sync.WaitGroup) {
rp.RemoteMetadata = kafkactl.TopicOffsetMap{}
wg.Done()
}
func (rp *ReplicatedPartition) InitReplicatedPartition(wg *sync.WaitGroup) {
rp.DateTimeSecs = time.Now().Unix()
rp.Result = ErrInitializingString
rp.Status = ErrInitializing
client, errd := kafkactl.NewClient(rp.LocalBroker)
if errd != nil {
rp.Result = ErrCreateClientString
rp.Status = ErrCreateClient
} else {
topicMeta := kafka.SearchPartitionMeta(client, rp.Topic, rp.LocalPartition)
if topicMeta.Topic == "NOTFOUND" {
rp.Result = ErrTopicOrPartNotFoundString
rp.Status = ErrTopicOrPartNotFound
}
if topicMeta.Topic == "ErrTopicMetadata" {
rp.Result = ErrTopicMetaString
rp.Status = ErrTopicMeta
}
rp.Metadata = topicMeta
}
client.Close()
wg.Done()
}
func (rp *ReplicatedPartition) RPSendCheck(config *config.Config, wg *sync.WaitGroup) {
var processErr bool
rp.DateTimeSecs = time.Now().Unix()
rp.PreviousResult = rp.Result
rp.PreviousStatus = rp.Status
rp.LocalNeedPRE = 0
rp.RemoteNeedPRE = 0
client, errd := kafkactl.NewClient(rp.LocalBroker)
if errd != nil {
rp.Result = ErrCreateClientString
rp.Status = ErrCreateClient
processErr = true
}
remoteClient, errd := kafkactl.NewClient(rp.RemoteBroker)
if errd != nil {
rp.Result = ErrCreateClientString
rp.Status = ErrCreateClient
processErr = true
}
if !processErr {
topicMeta := kafka.SearchPartitionMeta(client, rp.Topic, rp.LocalPartition)
if topicMeta.Topic == "NOTFOUND" {
rp.Result = ErrTopicOrPartNotFoundString
rp.Status = ErrTopicOrPartNotFound
processErr = true
}
if topicMeta.Topic == "ErrTopicMetadata" {
rp.Result = ErrTopicMetaString
rp.Status = ErrTopicMeta
processErr = true
}
remoteMeta := kafka.SearchTopicMeta(remoteClient, rp.Topic)
if len(remoteMeta) < 1 {
rp.Result = ErrTopicOrPartNotFoundString
rp.Status = ErrTopicOrPartNotFound
processErr = true
}
if !processErr {
rp.Metadata = topicMeta
client.SaramaConfig().Producer.RequiredAcks = sarama.WaitForAll
client.SaramaConfig().Producer.Return.Successes = true
client.SaramaConfig().Producer.Return.Errors = true
client.SaramaConfig().Producer.Partitioner = sarama.NewManualPartitioner
xPart := (cast.ToInt64(rp.LocalPartition) + 1)
timeCheck := []byte(fmt.Sprintf("%v_%v", rp.Topic, (rp.DateTimeSecs * xPart)))
setKey := []byte(config.CheckKey)
setKey = append(setKey, timeCheck...)
fs, _ := json.Marshal(&Source{
Kind: RemoteKind,
DateTimeSecs: rp.DateTimeSecs,
SetKey: fmt.Sprintf("%s", setKey),
Topic: rp.Topic,
LocalBroker: rp.LocalBroker,
LocalPartition: rp.LocalPartition,
})
setVal := fs
ROM := remoteClient.MakeTopicOffsetMap(remoteMeta)
for _, r := range ROM {
if r.Topic == rp.Topic {
rp.RemoteMetadata = r
}
}
if topicMeta.Leader != topicMeta.Replicas[0] {
rp.LocalNeedPRE = NeedLocalPRE
}
if needPRECheck(remoteMeta) {
rp.RemoteNeedPRE = NeedRemotePRE
}
sendMsg := kafkactl.Message{
Key: setKey,
Value: setVal,
Topic: rp.Topic,
Partition: rp.LocalPartition,
}
rp.SetKey = fmt.Sprintf("%s", sendMsg.Key)
rp.SetValue = fmt.Sprintf("%s", sendMsg.Value)
part, off, err := client.SendMSG(&sendMsg)
rp.LocalOffset = off
if part != rp.LocalPartition {
rp.Status = ErrPartitionOrOffset
rp.Result = ErrPartitionOrOffsetString
processErr = true
}
if off < 0 {
rp.Status = ErrPartitionOrOffset
rp.Result = ErrPartitionOrOffsetString
processErr = true
}
if err != nil {
rp.Status = ErrSendMsg
rp.Result = ErrSendMsgString
processErr = true
}
if !processErr {
msg, _ := client.ConsumeOffsetMsg(rp.Topic, part, off)
rp.LocalDateTimeNano = msg.Timestamp.UnixNano()
}
}
}
rp.GetValue = ""
rp.Match = false
client.Close()
remoteClient.Close()
wg.Done()
}
func RPCheck(config *config.Config, D *Detail, wg *sync.WaitGroup) {
var result string
var status int16
var processErr bool
remoteClient, errd := kafkactl.NewClient(config.RemoteBoot)
if errd != nil {
result = ErrCreateClientString
status = ErrCreateClient
processErr = true
}
if !processErr {
topic, keyValMap, remoteOffsetMap := getMaps(D)
keyFoundMap := make(map[string]bool)
msgChan := make(chan *kafkactl.Message, 100)
stopChan := make(chan bool, len(remoteOffsetMap))
for part, offset := range remoteOffsetMap {
rel := (offset - 1)
go remoteClient.ChanPartitionConsume(topic, part, rel, msgChan, stopChan)
}
timer := make(chan string, 1)
go func() {
time.Sleep(config.TimeOut)
timer <- ErrTimedOutString
}()
var count int
ConsumeLoop:
for {
if count >= len(remoteOffsetMap) {
break ConsumeLoop
}
select {
case msg := <-msgChan:
recKey := fmt.Sprintf("%s", msg.Key)
recVal := fmt.Sprintf("%s", msg.Value)
if keyValMap[recKey] == recVal {
if !keyFoundMap[recKey] {
for _, rp := range D.ReplicatedPartitions {
if rp.SetKey == recKey {
keyFoundMap[recKey] = true
rp.RemoteDateTimeNano = msg.Timestamp.UnixNano()
rp.LatencyNano = rp.RemoteDateTimeNano - rp.LocalDateTimeNano
rp.LatencyMS = float32(rp.LatencyNano) / 1000000
rp.LastSuccessDateTimeSecs = msg.Timestamp.Unix()
rp.GetValue = recVal
rp.RemotePartition = msg.Partition
rp.RemoteOffset = msg.Offset
rp.Status = ErrNone
rp.Result = ErrNoneString
rp.Match = true
count++
break
}
}
}
}
case to := <-timer:
status = ErrTimedOut
result = to
processErr = true
break ConsumeLoop
}
}
for i := 0; i < len(remoteOffsetMap); i++ {
stopChan <- true
}
if processErr {
for _, rp := range D.ReplicatedPartitions {
if !keyFoundMap[rp.SetKey] {
rp.Match = false
rp.Result = result
rp.Status = status
currentDTNano := time.Now().UnixNano()
currentLatencyNano := currentDTNano - rp.LocalDateTimeNano
rp.LatencyNano = rp.LatencyNano + currentLatencyNano
rp.LatencyMS = float32(rp.LatencyNano) / 1000000
}
}
} else {
result = ErrNoneString
status = ErrNone
}
D.Summary.State = result
D.Summary.Status = status
}
remoteClient.Close()
wg.Done()
}
func getMaps(D *Detail) (topic string, keyValMap map[string]string, remoteOffsetMap map[int32]int64) {
keyValMap = make(map[string]string, len(D.ReplicatedPartitions))
for _, rp := range D.ReplicatedPartitions {
if rp.Status != 0 {
keyValMap[rp.SetKey] = rp.SetValue
}
for part := range rp.RemoteMetadata.PartitionOffsets {
if remoteOffsetMap == nil {
topic = rp.Topic
remoteOffsetMap = rp.RemoteMetadata.PartitionOffsets
} else {
if rp.RemoteMetadata.PartitionOffsets[part] < remoteOffsetMap[part] {
remoteOffsetMap[part] = rp.RemoteMetadata.PartitionOffsets[part]
}
}
}
}
return
}
func needPRECheck(remoteMeta []kafkactl.TopicMeta) bool {
for _, tm := range remoteMeta {
if tm.Leader != tm.Replicas[0] {
return true
}
}
return false
}
|
//package with commands
package commands
import (
"database/sql"
"fmt"
"log"
"strings"
_ "github.com/mattn/go-sqlite3"
)
// LastBook выводит последние 20 добавленных книг.
func LastBook(database string) string {
type Book struct {
id int
title string
}
var id int
var title string
db, err := sql.Open("sqlite3", database)
if err != nil {
panic(err)
}
defer db.Close()
// выводим 20 последних книг
rows, err := db.Query("SELECT id,title FROM main.books ORDER BY id DESC LIMIT 20")
if err != nil {
panic(err)
}
defer rows.Close()
// парсим ответ базы данных
var books []Book
for rows.Next() {
err := rows.Scan(&id, &title)
if err != nil {
log.Fatal(err)
}
books = append(books, Book{id, title})
}
str := strings.Replace(fmt.Sprint(books), "} {", "\n/", -1)
str1 := strings.Replace(str, "[{", "Найдено:\n/", -1)
str2 := strings.Replace(str1, "}]", "\n\nНажми на номер для получения описания и скачивания", -1)
return str2
}
|
package users
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
)
func (u *UsersTest) TestUsers_V1CreateUserHandler() {
name := "test from test case"
address := "test from test case"
newUserTest := User{
Name: name,
Address: address,
}
jsonStr, _ := json.Marshal(newUserTest)
// create requests
req := httptest.NewRequest("POST", "/", bytes.NewBuffer(jsonStr))
w := httptest.NewRecorder()
v1CreateUserHandler(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
u.Equal(http.StatusCreated, resp.StatusCode, "it should be 201")
u.Equal("\"success creating user\"\n", string(body), "it should be same return")
}
|
package keeper
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/palantir/stacktrace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
ethparams "github.com/ethereum/go-ethereum/params"
ethermint "github.com/tharsis/ethermint/types"
"github.com/tharsis/ethermint/x/evm/types"
)
var _ types.QueryServer = Keeper{}
const (
defaultTraceTimeout = 5 * time.Second
)
// Account implements the Query/Account gRPC method
func (k Keeper) Account(c context.Context, req *types.QueryAccountRequest) (*types.QueryAccountResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
if err := ethermint.ValidateAddress(req.Address); err != nil {
return nil, status.Error(
codes.InvalidArgument, err.Error(),
)
}
addr := common.HexToAddress(req.Address)
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
return &types.QueryAccountResponse{
Balance: k.GetBalance(addr).String(),
CodeHash: k.GetCodeHash(addr).Hex(),
Nonce: k.GetNonce(addr),
}, nil
}
func (k Keeper) CosmosAccount(c context.Context, req *types.QueryCosmosAccountRequest) (*types.QueryCosmosAccountResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
if err := ethermint.ValidateAddress(req.Address); err != nil {
return nil, status.Error(
codes.InvalidArgument, err.Error(),
)
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
ethAddr := common.HexToAddress(req.Address)
cosmosAddr := sdk.AccAddress(ethAddr.Bytes())
account := k.accountKeeper.GetAccount(ctx, cosmosAddr)
res := types.QueryCosmosAccountResponse{
CosmosAddress: cosmosAddr.String(),
}
if account != nil {
res.Sequence = account.GetSequence()
res.AccountNumber = account.GetAccountNumber()
}
return &res, nil
}
func (k Keeper) ValidatorAccount(c context.Context, req *types.QueryValidatorAccountRequest) (*types.QueryValidatorAccountResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
consAddr, err := sdk.ConsAddressFromBech32(req.ConsAddress)
if err != nil {
return nil, status.Error(
codes.InvalidArgument, err.Error(),
)
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
validator, found := k.stakingKeeper.GetValidatorByConsAddr(ctx, consAddr)
if !found {
return nil, nil
}
accAddr := sdk.AccAddress(validator.GetOperator())
res := types.QueryValidatorAccountResponse{
AccountAddress: accAddr.String(),
}
account := k.accountKeeper.GetAccount(ctx, accAddr)
if account != nil {
res.Sequence = account.GetSequence()
res.AccountNumber = account.GetAccountNumber()
}
return &res, nil
}
// Balance implements the Query/Balance gRPC method
func (k Keeper) Balance(c context.Context, req *types.QueryBalanceRequest) (*types.QueryBalanceResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
if err := ethermint.ValidateAddress(req.Address); err != nil {
return nil, status.Error(
codes.InvalidArgument,
types.ErrZeroAddress.Error(),
)
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
balanceInt := k.GetBalance(common.HexToAddress(req.Address))
return &types.QueryBalanceResponse{
Balance: balanceInt.String(),
}, nil
}
// Storage implements the Query/Storage gRPC method
func (k Keeper) Storage(c context.Context, req *types.QueryStorageRequest) (*types.QueryStorageResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
if err := ethermint.ValidateAddress(req.Address); err != nil {
return nil, status.Error(
codes.InvalidArgument,
types.ErrZeroAddress.Error(),
)
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
address := common.HexToAddress(req.Address)
key := common.HexToHash(req.Key)
state := k.GetState(address, key)
stateHex := state.Hex()
return &types.QueryStorageResponse{
Value: stateHex,
}, nil
}
// Code implements the Query/Code gRPC method
func (k Keeper) Code(c context.Context, req *types.QueryCodeRequest) (*types.QueryCodeResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
if err := ethermint.ValidateAddress(req.Address); err != nil {
return nil, status.Error(
codes.InvalidArgument,
types.ErrZeroAddress.Error(),
)
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
address := common.HexToAddress(req.Address)
code := k.GetCode(address)
return &types.QueryCodeResponse{
Code: code,
}, nil
}
// Params implements the Query/Params gRPC method
func (k Keeper) Params(c context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) {
ctx := sdk.UnwrapSDKContext(c)
params := k.GetParams(ctx)
return &types.QueryParamsResponse{
Params: params,
}, nil
}
// EthCall implements eth_call rpc api.
func (k Keeper) EthCall(c context.Context, req *types.EthCallRequest) (*types.MsgEthereumTxResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
var args types.TransactionArgs
err := json.Unmarshal(req.Args, &args)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
params := k.GetParams(ctx)
ethCfg := params.ChainConfig.EthereumConfig(k.eip155ChainID)
var baseFee *big.Int
if types.IsLondon(ethCfg, ctx.BlockHeight()) {
baseFee = k.feeMarketKeeper.GetBaseFee(ctx)
}
msg, err := args.ToMessage(req.GasCap, baseFee)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
coinbase, err := k.GetCoinbaseAddress(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
tracer := types.NewTracer(k.tracer, msg, ethCfg, ctx.BlockHeight(), k.debug)
evm := k.NewEVM(msg, ethCfg, params, coinbase, baseFee, tracer)
// pass true means execute in query mode, which don't do actual gas refund.
res, err := k.ApplyMessage(evm, msg, ethCfg, true)
k.ctxStack.RevertAll()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return res, nil
}
// EstimateGas implements eth_estimateGas rpc api.
func (k Keeper) EstimateGas(c context.Context, req *types.EthCallRequest) (*types.EstimateGasResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
if req.GasCap < ethparams.TxGas {
return nil, status.Error(codes.InvalidArgument, "gas cap cannot be lower than 21,000")
}
var args types.TransactionArgs
err := json.Unmarshal(req.Args, &args)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// Binary search the gas requirement, as it may be higher than the amount used
var (
lo = ethparams.TxGas - 1
hi uint64
cap uint64
)
// Determine the highest gas limit can be used during the estimation.
if args.Gas != nil && uint64(*args.Gas) >= ethparams.TxGas {
hi = uint64(*args.Gas)
} else {
// Query block gas limit
params := ctx.ConsensusParams()
if params != nil && params.Block != nil && params.Block.MaxGas > 0 {
hi = uint64(params.Block.MaxGas)
} else {
hi = req.GasCap
}
}
// TODO: Recap the highest gas limit with account's available balance.
// Recap the highest gas allowance with specified gascap.
if req.GasCap != 0 && hi > req.GasCap {
hi = req.GasCap
}
cap = hi
params := k.GetParams(ctx)
ethCfg := params.ChainConfig.EthereumConfig(k.eip155ChainID)
coinbase, err := k.GetCoinbaseAddress(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
var baseFee *big.Int
if types.IsLondon(ethCfg, ctx.BlockHeight()) {
baseFee = k.feeMarketKeeper.GetBaseFee(ctx)
}
// Create a helper to check if a gas allowance results in an executable transaction
executable := func(gas uint64) (vmerror bool, rsp *types.MsgEthereumTxResponse, err error) {
args.Gas = (*hexutil.Uint64)(&gas)
// Reset to the initial context
k.WithContext(ctx)
msg, err := args.ToMessage(req.GasCap, baseFee)
if err != nil {
return false, nil, err
}
tracer := types.NewTracer(k.tracer, msg, ethCfg, k.Ctx().BlockHeight(), k.debug)
evm := k.NewEVM(msg, ethCfg, params, coinbase, baseFee, tracer)
// pass true means execute in query mode, which don't do actual gas refund.
rsp, err = k.ApplyMessage(evm, msg, ethCfg, true)
k.ctxStack.RevertAll()
if err != nil {
if errors.Is(stacktrace.RootCause(err), core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
}
return true, nil, err // Bail out
}
return len(rsp.VmError) > 0, rsp, nil
}
// Execute the binary search and hone in on an executable gas limit
hi, err = types.BinSearch(lo, hi, executable)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// Reject the transaction as invalid if it still fails at the highest allowance
if hi == cap {
failed, result, err := executable(hi)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if failed {
if result != nil && result.VmError != vm.ErrOutOfGas.Error() {
if result.VmError == vm.ErrExecutionReverted.Error() {
return nil, types.NewExecErrorWithReason(result.Ret)
}
return nil, status.Error(codes.Internal, result.VmError)
}
// Otherwise, the specified gas cap is too low
return nil, status.Error(codes.Internal, fmt.Sprintf("gas required exceeds allowance (%d)", cap))
}
}
return &types.EstimateGasResponse{Gas: hi}, nil
}
// TraceTx configures a new tracer according to the provided configuration, and
// executes the given message in the provided environment. The return value will
// be tracer dependent.
func (k Keeper) TraceTx(c context.Context, req *types.QueryTraceTxRequest) (*types.QueryTraceTxResponse, error) {
if req == nil {
return nil, status.Error(codes.InvalidArgument, "empty request")
}
if req.TraceConfig != nil && req.TraceConfig.Limit < 0 {
return nil, status.Errorf(codes.InvalidArgument, "output limit cannot be negative, got %d", req.TraceConfig.Limit)
}
ctx := sdk.UnwrapSDKContext(c)
k.WithContext(ctx)
coinbase, err := k.GetCoinbaseAddress(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
params := k.GetParams(ctx)
ethCfg := params.ChainConfig.EthereumConfig(k.eip155ChainID)
signer := ethtypes.MakeSigner(ethCfg, big.NewInt(ctx.BlockHeight()))
tx := req.Msg.AsTransaction()
baseFee := k.feeMarketKeeper.GetBaseFee(ctx)
result, err := k.traceTx(ctx, coinbase, signer, req.TxIndex, params, ethCfg, tx, baseFee, req.TraceConfig)
if err != nil {
return nil, err
}
resultData, err := json.Marshal(result)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &types.QueryTraceTxResponse{
Data: resultData,
}, nil
}
func (k *Keeper) traceTx(
ctx sdk.Context,
coinbase common.Address,
signer ethtypes.Signer,
txIndex uint64,
params types.Params,
ethCfg *ethparams.ChainConfig,
tx *ethtypes.Transaction,
baseFee *big.Int,
traceConfig *types.TraceConfig,
) (*interface{}, error) {
// Assemble the structured logger or the JavaScript tracer
var (
tracer vm.Tracer
overrides *ethparams.ChainConfig
err error
)
msg, err := tx.AsMessage(signer, baseFee)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
txHash := tx.Hash()
if traceConfig != nil && traceConfig.Overrides != nil {
overrides = traceConfig.Overrides.EthereumConfig(ethCfg.ChainID)
}
switch {
case traceConfig != nil && traceConfig.Tracer != "":
timeout := defaultTraceTimeout
// TODO: change timeout to time.duration
// Used string to comply with go ethereum
if traceConfig.Timeout != "" {
timeout, err = time.ParseDuration(traceConfig.Timeout)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "timeout value: %s", err.Error())
}
}
tCtx := &tracers.Context{
BlockHash: k.GetHashFn()(uint64(ctx.BlockHeight())),
TxIndex: int(txIndex),
TxHash: txHash,
}
// Construct the JavaScript tracer to execute with
if tracer, err = tracers.New(traceConfig.Tracer, tCtx); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// Handle timeouts and RPC cancellations
deadlineCtx, cancel := context.WithTimeout(ctx.Context(), timeout)
defer cancel()
go func() {
<-deadlineCtx.Done()
if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) {
tracer.(*tracers.Tracer).Stop(errors.New("execution timeout"))
}
}()
case traceConfig != nil:
logConfig := vm.LogConfig{
EnableMemory: traceConfig.EnableMemory,
DisableStorage: traceConfig.DisableStorage,
DisableStack: traceConfig.DisableStack,
EnableReturnData: traceConfig.EnableReturnData,
Debug: traceConfig.Debug,
Limit: int(traceConfig.Limit),
Overrides: overrides,
}
tracer = vm.NewStructLogger(&logConfig)
default:
tracer = types.NewTracer(types.TracerStruct, msg, ethCfg, ctx.BlockHeight(), true)
}
evm := k.NewEVM(msg, ethCfg, params, coinbase, baseFee, tracer)
k.SetTxHashTransient(txHash)
k.SetTxIndexTransient(txIndex)
res, err := k.ApplyMessage(evm, msg, ethCfg, true)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
var result interface{}
// Depending on the tracer type, format and return the trace result data.
switch tracer := tracer.(type) {
case *vm.StructLogger:
// TODO: Return proper returnValue
result = types.ExecutionResult{
Gas: res.GasUsed,
Failed: res.Failed(),
ReturnValue: "",
StructLogs: types.FormatLogs(tracer.StructLogs()),
}
case *tracers.Tracer:
result, err = tracer.GetResult()
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
default:
return nil, status.Errorf(codes.InvalidArgument, "invalid tracer type %T", tracer)
}
return &result, nil
}
|
package thermal
import (
"context"
"sync"
"github.com/spf13/viper"
"github.com/superchalupa/sailfish/src/log"
"github.com/superchalupa/sailfish/src/ocp/testaggregate"
"github.com/superchalupa/sailfish/src/ocp/view"
domain "github.com/superchalupa/sailfish/src/redfishresource"
eh "github.com/looplab/eventhorizon"
)
func RegisterAggregate(s *testaggregate.Service) {
s.RegisterAggregateFunction("thermal",
func(ctx context.Context, subLogger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, extra interface{}, params map[string]interface{}) ([]eh.Command, error) {
return []eh.Command{
&domain.CreateRedfishResource{
ResourceURI: vw.GetURI(),
Type: "#Thermal.v1_0_2.Thermal",
Context: "/redfish/v1/$metadata#Thermal.Thermal",
Privileges: map[string]interface{}{
"GET": []string{"Login"},
"POST": []string{}, // cannot create sub objects
"PUT": []string{},
"PATCH": []string{"ConfigureManager"},
"DELETE": []string{}, // can't be deleted
},
Properties: map[string]interface{}{
"Id": "Thermal",
"Name": "Thermal",
"Description": "Represents the properties for Temperature and Cooling",
"Fans@meta": vw.Meta(view.GETProperty("fan_uris"), view.GETFormatter("expand"), view.GETModel("default")),
"Fans@odata.count@meta": vw.Meta(view.GETProperty("fan_uris"), view.GETFormatter("count"), view.GETModel("default")),
"Temperatures@meta": vw.Meta(view.GETProperty("temperature_uris"), view.GETFormatter("expand"), view.GETModel("default")),
"Temperatures@odata.count@meta": vw.Meta(view.GETProperty("temperature_uris"), view.GETFormatter("count"), view.GETModel("default")),
"Redundancy@meta": vw.Meta(view.GETProperty("redundancy_uris"), view.GETFormatter("expand"), view.GETModel("default")),
"Redundancy@odata.count@meta": vw.Meta(view.GETProperty("redundancy_uris"), view.GETFormatter("count"), view.GETModel("default")),
"Oem": map[string]interface{}{
"EID_674": map[string]interface{}{
"FansSummary": map[string]interface{}{
"Status": map[string]interface{}{
"HealthRollup@meta": vw.Meta(view.GETProperty("fan_rollup"), view.GETModel("global_health")),
"Health@meta": vw.Meta(view.GETProperty("fan_rollup"), view.GETModel("global_health")),
},
},
"TemperaturesSummary": map[string]interface{}{
"Status": map[string]interface{}{
"HealthRollup@meta": vw.Meta(view.GETProperty("temperature_rollup"), view.GETModel("global_health")),
"Health@meta": vw.Meta(view.GETProperty("temperature_rollup"), view.GETModel("global_health")),
},
},
},
},
}},
}, nil
})
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jdbg
import (
"fmt"
"reflect"
)
// assignable returns true if the type of val can be assigned to dst.
func (j *JDbg) assignable(dst Type, val interface{}) bool {
if v, ok := val.(Value); ok {
if v.ty.CastableTo(dst) {
return true
}
if dst.Signature() == v.ty.Signature() {
// Sanity check.
panic(fmt.Sprint("Two different types found with identical signatures! ", dst.Signature()))
}
return j.assignable(dst, v.val)
}
return j.assignableT(dst, reflect.TypeOf(val))
}
func (j *JDbg) assignableT(dst Type, src reflect.Type) bool {
if dst == j.cache.objTy {
return true // Anything goes in an object!
}
if src == nil {
_, isClass := dst.(*Class)
return isClass
}
switch src.Kind() {
case reflect.Ptr, reflect.Interface:
return j.assignableT(dst, src.Elem())
case reflect.Bool:
return dst == j.cache.boolTy
case reflect.Int8, reflect.Uint8:
return dst == j.cache.byteTy
case reflect.Int16:
return dst == j.cache.charTy || dst == j.cache.shortTy
case reflect.Int32, reflect.Int:
return dst == j.cache.intTy
case reflect.Int64:
return dst == j.cache.longTy
case reflect.Float32:
return dst == j.cache.floatTy
case reflect.Float64:
return dst == j.cache.doubleTy
case reflect.String:
return dst == j.cache.stringTy
case reflect.Array, reflect.Slice:
if dstArray, ok := dst.(*Array); ok {
return j.assignableT(dstArray.el, src.Elem())
}
}
return false
}
|
package api
import (
"encoding/json"
"net/http"
"github.com/ankur-anand/gostudygroup-bot/config"
"github.com/ankur-anand/gostudygroup-bot/helper"
)
var (
cfg = config.Cfg
logger = helper.Logger
)
// resWithError JSON
func resWithError(w http.ResponseWriter, code int, message string) {
resWithJSON(w, code, map[string]string{"error": message})
}
// resWithSuccess JSON
func resWithSuccess(w http.ResponseWriter, message string) {
resWithJSON(w, http.StatusOK, map[string]string{"result": message})
}
func resWithJSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
|
package service
// Service Define a service interface
type Service interface {
// Add calculate a+b
Add(a, b int) int
// Subtract calculate a-b
Subtract(a, b int) int
// Multiply calculate a*b
Multiply(a, b int) int
// Divide calculate a/b
Divide(a, b int) (int, error)
}
|
/*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package parameterizer_test
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/konveyor/move2kube/parameterizer"
)
func TestGetSubKeys(t *testing.T) {
testcases := []struct {
input string
want []string
}{
{input: `aaa.bbb."ccc ddd".eee.fff`, want: []string{"aaa", "bbb", "ccc ddd", "eee", "fff"}},
{input: "aaa.bbb.ccc", want: []string{"aaa", "bbb", "ccc"}},
}
for i, testcase := range testcases {
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
subKeys := parameterizer.GetSubKeys(testcase.input)
if len(subKeys) != len(testcase.want) {
t.Fatalf("failed to get the correct number of subkeys. Expected %+v Actual %+v", testcase.want, subKeys)
}
same := true
for i, subKey := range subKeys {
if subKey != testcase.want[i] {
same = false
break
}
}
if !same {
t.Fatalf("failed to get the subkeys properly. Expected %+v Actual %+v", testcase.want, subKeys)
}
})
}
}
func TestGet2(t *testing.T) {
key := `"contain ers".[containerName:name=nginx].ports.[portName:name]`
resource := map[string]interface{}{
"foo": map[string]interface{}{
"bar": 42,
},
"contain ers": []interface{}{
map[string]interface{}{"name": "nginx", "image": "docker.io/foo/nginx:latest",
"ports": []interface{}{
map[string]interface{}{"name": "port1", "number": 8000},
map[string]interface{}{"name": "port2", "number": 8080},
},
},
map[string]interface{}{"name": "java", "image": "docker.io/bar/java:latest",
"ports": []interface{}{
map[string]interface{}{"name": "port1", "number": 4000},
map[string]interface{}{"name": "port2", "number": 4080},
},
},
map[string]interface{}{"name": "nginx", "image": "docker.io/foo/nginx:v1.2.0",
"ports": []interface{}{
map[string]interface{}{"name": "port1", "number": 1000},
map[string]interface{}{"name": "port2", "number": 1080},
},
},
},
}
want := []parameterizer.RT{
{Key: []string{"contain ers", "[0]", "ports", "[0]"}, Value: map[string]interface{}{"name": "port1", "number": 8000}, Matches: map[string]string{"containerName": "nginx", "portName": "port1"}},
{Key: []string{"contain ers", "[0]", "ports", "[1]"}, Value: map[string]interface{}{"name": "port2", "number": 8080}, Matches: map[string]string{"containerName": "nginx", "portName": "port2"}},
{Key: []string{"contain ers", "[2]", "ports", "[0]"}, Value: map[string]interface{}{"name": "port1", "number": 1000}, Matches: map[string]string{"containerName": "nginx", "portName": "port1"}},
{Key: []string{"contain ers", "[2]", "ports", "[1]"}, Value: map[string]interface{}{"name": "port2", "number": 1080}, Matches: map[string]string{"containerName": "nginx", "portName": "port2"}},
}
results, err := parameterizer.GetAll(key, resource)
if err != nil {
t.Fatalf("failed to get the values for the key %s Error: %q", key, err)
}
if !cmp.Equal(results, want) {
t.Fatalf("differences %+v", cmp.Diff(results, want))
}
}
|
package systemdeps
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
)
// List of system processes.
type Processes struct {
Processes []Process `json:"processes"`
}
// Representation of a process and it's dependencies.
type Process struct {
Name string `json:"name"`
Dependencies []string `json:"dependencies"`
}
// Read a JSON file and return the Processes struct.
func ReadDependencyFile(filename string) (*Processes, error) {
jsonFile, err := os.Open(filepath.Clean(filename))
if err != nil {
fmt.Println(err)
return nil, err
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
var processes Processes
json.Unmarshal(byteValue, &processes)
return &processes, nil
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// "make test" would normally test this file, but it should only be tested
// within docker compose. We also can't use just "gss" here because that
// tag is reserved for the toplevel Makefile's linux-gnu build.
// +build gss_compose
package gss
import (
gosql "database/sql"
"fmt"
"os/exec"
"regexp"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/lib/pq"
"github.com/lib/pq/auth/kerberos"
)
func init() {
pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() })
}
func TestGSS(t *testing.T) {
connector, err := pq.NewConnector("user=root sslmode=require")
if err != nil {
t.Fatal(err)
}
db := gosql.OpenDB(connector)
defer db.Close()
tests := []struct {
// The hba.conf file/setting.
conf string
user string
// Error message of hba conf
hbaErr string
// Error message of gss login.
gssErr string
}{
{
conf: `host all all all gss include_realm=0 nope=1`,
hbaErr: `unsupported option`,
},
{
conf: `host all all all gss include_realm=1`,
hbaErr: `include_realm must be set to 0`,
},
{
conf: `host all all all gss`,
hbaErr: `missing "include_realm=0"`,
},
{
conf: `host all all all gss include_realm=0`,
user: "tester",
gssErr: `GSS authentication requires an enterprise license`,
},
{
conf: `host all tester all gss include_realm=0`,
user: "tester",
gssErr: `GSS authentication requires an enterprise license`,
},
{
conf: `host all nope all gss include_realm=0`,
user: "tester",
gssErr: "no server.host_based_authentication.configuration entry",
},
{
conf: `host all all all gss include_realm=0 krb_realm=MY.EX`,
user: "tester",
gssErr: `GSS authentication requires an enterprise license`,
},
{
conf: `host all all all gss include_realm=0 krb_realm=NOPE.EX`,
user: "tester",
gssErr: `GSSAPI realm \(MY.EX\) didn't match any configured realm`,
},
{
conf: `host all all all gss include_realm=0 krb_realm=NOPE.EX krb_realm=MY.EX`,
user: "tester",
gssErr: `GSS authentication requires an enterprise license`,
},
}
for i, tc := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
if _, err := db.Exec(`SET CLUSTER SETTING server.host_based_authentication.configuration = $1`, tc.conf); !IsError(err, tc.hbaErr) {
t.Fatalf("expected err %v, got %v", tc.hbaErr, err)
}
if tc.hbaErr != "" {
return
}
if _, err := db.Exec(fmt.Sprintf(`CREATE USER IF NOT EXISTS '%s'`, tc.user)); err != nil {
t.Fatal(err)
}
t.Run("libpq", func(t *testing.T) {
userConnector, err := pq.NewConnector(fmt.Sprintf("user=%s sslmode=require krbspn=postgres/gss_cockroach_1.gss_default", tc.user))
if err != nil {
t.Fatal(err)
}
userDB := gosql.OpenDB(userConnector)
defer userDB.Close()
_, err = userDB.Exec("SELECT 1")
if !IsError(err, tc.gssErr) {
t.Errorf("expected err %v, got %v", tc.gssErr, err)
}
})
t.Run("psql", func(t *testing.T) {
out, err := exec.Command("psql", "-c", "SELECT 1", "-U", tc.user).CombinedOutput()
err = errors.Wrap(err, strings.TrimSpace(string(out)))
if !IsError(err, tc.gssErr) {
t.Errorf("expected err %v, got %v", tc.gssErr, err)
}
})
t.Run("cockroach", func(t *testing.T) {
out, err := exec.Command("/cockroach/cockroach", "sql",
"-e", "SELECT 1",
"--certs-dir", "/certs",
// TODO(mjibson): Teach the CLI to not ask for passwords during kerberos.
// See #51588.
"--url", fmt.Sprintf("postgresql://%s:nopassword@cockroach:26257/?sslmode=require&krbspn=postgres/gss_cockroach_1.gss_default", tc.user),
).CombinedOutput()
err = errors.Wrap(err, strings.TrimSpace(string(out)))
if !IsError(err, tc.gssErr) {
t.Errorf("expected err %v, got %v", tc.gssErr, err)
}
})
})
}
}
func TestGSSFileDescriptorCount(t *testing.T) {
// When the docker-compose.yml added a ulimit for the cockroach
// container the open file count would just stop there, it wouldn't
// cause cockroach to panic or error like I had hoped since it would
// allow a test to assert that multiple gss connections didn't leak
// file descriptors. Another possibility would be to have something
// track the open file count in the cockroach container, but that seems
// brittle and probably not worth the effort. However this test is
// useful when doing manual tracking of file descriptor count.
t.Skip("#51791")
rootConnector, err := pq.NewConnector("user=root sslmode=require")
if err != nil {
t.Fatal(err)
}
rootDB := gosql.OpenDB(rootConnector)
defer rootDB.Close()
if _, err := rootDB.Exec(`SET CLUSTER SETTING server.host_based_authentication.configuration = $1`, "host all all all gss include_realm=0"); err != nil {
t.Fatal(err)
}
const user = "tester"
if _, err := rootDB.Exec(fmt.Sprintf(`CREATE USER IF NOT EXISTS '%s'`, user)); err != nil {
t.Fatal(err)
}
start := timeutil.Now()
for i := 0; i < 1000; i++ {
fmt.Println(i, timeutil.Since(start))
out, err := exec.Command("psql", "-c", "SELECT 1", "-U", user).CombinedOutput()
if IsError(err, "GSS authentication requires an enterprise license") {
t.Log(string(out))
t.Fatal(err)
}
}
}
func IsError(err error, re string) bool {
if err == nil && re == "" {
return true
}
if err == nil || re == "" {
return false
}
matched, merr := regexp.MatchString(re, err.Error())
if merr != nil {
return false
}
return matched
}
|
package cache
import "sync"
/*
Дано:
InMemoryCache - потоко-безопасная реализация Key-Value кэша, хранящая данные в оперативной памяти
Задача:
1. Реализовать метод GetOrSet, предоставив следующие гарантии:
- Значение каждого ключа будет вычислено ровно 1 раз
- Конкурентные обращения к существующим ключам не блокируют друг друга
2. Покрыть его тестами, проверить метод 1000+ горутинами
*/
type (
Key = string
Value = string
)
type Cache interface {
GetOrSet(key Key, valueFn func() Value) Value
Get(key Key) (Value, bool)
}
// ----------------------------------------------
type InMemoryCache struct {
dataMutex sync.RWMutex
data map[Key]Value
}
func NewInMemoryCache() *InMemoryCache {
return &InMemoryCache{
data: make(map[Key]Value),
}
}
// Get Получение данных из кеша.
func (cache *InMemoryCache) Get(key Key) (Value, bool) {
cache.dataMutex.RLock()
defer cache.dataMutex.RUnlock()
value, found := cache.data[key]
return value, found
}
// GetOrSet возвращает значение ключа в случае его существования. Иначе, вычисляет значение ключа при помощи valueFn, сохраняет его в кэш и возвращает это значение.
func (cache *InMemoryCache) GetOrSet(key Key, valueFn func() Value) Value {
cache.dataMutex.RLock()
if value, found := cache.data[key]; !found {
cache.dataMutex.RUnlock()
cache.dataMutex.Lock()
if value, found := cache.data[key]; !found {
value = valueFn()
cache.data[key] = value
cache.dataMutex.Unlock()
return value
} else {
cache.dataMutex.Unlock()
return value
}
} else {
cache.dataMutex.RUnlock()
return value
}
}
|
package e7
import (
"errors"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestTopic_String(t *testing.T) {
tests := []struct {
in Topic
want string
}{
{in: Criticism, want: "Criticism"},
{in: RealityCheck, want: "Reality Check"},
{in: HeroicTale, want: "Heroic Tale"},
{in: ComfortingCheer, want: "Comforting Cheer"},
{in: CuteCheer, want: "Cute Cheer"},
{in: HeroicCheer, want: "Heroic Cheer"},
{in: SadMemory, want: "Sad Memory"},
{in: JoyfulMemory, want: "Joyful Memory"},
{in: HappyMemory, want: "Happy Memory"},
{in: UniqueComment, want: "Unique Comment"},
{in: SelfIndulgent, want: "Self-Indulgent"},
{in: Occult, want: "Occult"},
{in: Myth, want: "Myth"},
{in: BizarreStory, want: "Bizarre Story"},
{in: FoodStory, want: "Food Story"},
{in: HorrorStory, want: "Horror Story"},
{in: Gossip, want: "Gossip"},
{in: Dream, want: "Dream"},
{in: Advice, want: "Advice"},
{in: Complain, want: "Complain"},
{in: Belief, want: "Belief"},
{in: InterestingStory, want: "Interesting Story"},
}
for _, tt := range tests {
got := tt.in.String()
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("Topic.String mismatch (-want, +got):\n%s", diff)
}
}
}
func TestTopic_MarshalJSON(t *testing.T) {
tests := []struct {
in Topic
want []byte
}{
{in: Criticism, want: []byte(`"Criticism"`)},
{in: RealityCheck, want: []byte(`"Reality Check"`)},
{in: HeroicTale, want: []byte(`"Heroic Tale"`)},
{in: ComfortingCheer, want: []byte(`"Comforting Cheer"`)},
{in: CuteCheer, want: []byte(`"Cute Cheer"`)},
{in: HeroicCheer, want: []byte(`"Heroic Cheer"`)},
{in: SadMemory, want: []byte(`"Sad Memory"`)},
{in: JoyfulMemory, want: []byte(`"Joyful Memory"`)},
{in: HappyMemory, want: []byte(`"Happy Memory"`)},
{in: UniqueComment, want: []byte(`"Unique Comment"`)},
{in: SelfIndulgent, want: []byte(`"Self-Indulgent"`)},
{in: Occult, want: []byte(`"Occult"`)},
{in: Myth, want: []byte(`"Myth"`)},
{in: BizarreStory, want: []byte(`"Bizarre Story"`)},
{in: FoodStory, want: []byte(`"Food Story"`)},
{in: HorrorStory, want: []byte(`"Horror Story"`)},
{in: Gossip, want: []byte(`"Gossip"`)},
{in: Dream, want: []byte(`"Dream"`)},
{in: Advice, want: []byte(`"Advice"`)},
{in: Complain, want: []byte(`"Complain"`)},
{in: Belief, want: []byte(`"Belief"`)},
{in: InterestingStory, want: []byte(`"Interesting Story"`)},
}
for _, tt := range tests {
got, err := tt.in.MarshalJSON()
if err != nil {
t.Errorf("Topic.MarshalJSON returned error: %v", err)
}
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("Topic.MarshalJSON mismatch (-want, +got):\n%s", diff)
}
}
}
func TestTopic_UnmarshalJSON(t *testing.T) {
tests := []struct {
in []byte
want Topic
}{
{in: []byte(`"Criticism"`), want: Criticism},
{in: []byte(`"Reality Check"`), want: RealityCheck},
{in: []byte(`"Heroic Tale"`), want: HeroicTale},
{in: []byte(`"Comforting Cheer"`), want: ComfortingCheer},
{in: []byte(`"Cute Cheer"`), want: CuteCheer},
{in: []byte(`"Heroic Cheer"`), want: HeroicCheer},
{in: []byte(`"Sad Memory"`), want: SadMemory},
{in: []byte(`"Joyful Memory"`), want: JoyfulMemory},
{in: []byte(`"Happy Memory"`), want: HappyMemory},
{in: []byte(`"Unique Comment"`), want: UniqueComment},
{in: []byte(`"Self-Indulgent"`), want: SelfIndulgent},
{in: []byte(`"Occult"`), want: Occult},
{in: []byte(`"Myth"`), want: Myth},
{in: []byte(`"Bizarre Story"`), want: BizarreStory},
{in: []byte(`"Food Story"`), want: FoodStory},
{in: []byte(`"Horror Story"`), want: HorrorStory},
{in: []byte(`"Gossip"`), want: Gossip},
{in: []byte(`"Dream"`), want: Dream},
{in: []byte(`"Advice"`), want: Advice},
{in: []byte(`"Complain"`), want: Complain},
{in: []byte(`"Belief"`), want: Belief},
{in: []byte(`"Interesting Story"`), want: InterestingStory},
}
for _, tt := range tests {
tp := new(Topic)
err := tp.UnmarshalJSON(tt.in)
if err != nil {
t.Errorf("Topic.UnmarshalJSON returned error: %v", err)
}
if diff := cmp.Diff(tt.want, *tp); diff != "" {
t.Errorf("Topic.UnmarshalJSON mismatch (-want +got):\n%s", diff)
}
}
}
func TestTopic_UnmarshalJSON_unknownTopic(t *testing.T) {
tp := new(Topic)
err := tp.UnmarshalJSON([]byte(`"test"`))
if err == nil {
t.Errorf("Expected error to be returned")
}
if !errors.Is(err, ErrUnknownTopic) {
t.Errorf("expected unknown topic error")
}
}
|
/*
* Copyright 2022 Kube Admission Webhook Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package certificate
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
var _ = Describe("Certificate Options", func() {
type setDefaultsAndValidateCase struct {
options Options
expectedOptions Options
isValid bool
}
DescribeTable("setDefaultsAndValidate",
func(c setDefaultsAndValidateCase) {
err := c.options.setDefaultsAndValidate()
if c.isValid {
Expect(err).To(Succeed(), "should succeed validating the options")
} else {
Expect(err).ToNot(Succeed(), "should not succeed validating the options")
}
Expect(c.options).To(Equal(c.expectedOptions), "should equal expected options after setting defaults")
},
Entry("Empty options should be invalid since it's missing webhook name and namespace and set defaults", setDefaultsAndValidateCase{
isValid: false,
}),
Entry("Just passing webhook name options should be invalid since it's missing namespace and set default", setDefaultsAndValidateCase{
options: Options{
WebhookName: "MyWebhook",
},
expectedOptions: Options{
WebhookName: "MyWebhook",
},
isValid: false,
}),
Entry("Passing webhook name and namespace options should be valid and set default", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: MutatingWebhook,
CARotateInterval: OneYearDuration,
CAOverlapInterval: OneYearDuration,
CertRotateInterval: OneYearDuration,
CertOverlapInterval: OneYearDuration,
},
isValid: true,
}),
Entry("Passing WebhookType ValidatingWebhook options should be valid", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: ValidatingWebhook,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: ValidatingWebhook,
CARotateInterval: OneYearDuration,
CAOverlapInterval: OneYearDuration,
CertRotateInterval: OneYearDuration,
CertOverlapInterval: OneYearDuration,
},
isValid: true,
}),
Entry("Passing WebhookType MutatingWebhook options should be valid", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: MutatingWebhook,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: MutatingWebhook,
CARotateInterval: OneYearDuration,
CAOverlapInterval: OneYearDuration,
CertRotateInterval: OneYearDuration,
CertOverlapInterval: OneYearDuration,
},
isValid: true,
}),
Entry("Passing unknown WebhookType should be invalid", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: "BadWebhookType",
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: "BadWebhookType",
},
isValid: false,
}),
Entry("CAOverlapInterval has to default to CARotateInterval", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 2 * OneYearDuration,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: MutatingWebhook,
CARotateInterval: 2 * OneYearDuration,
CAOverlapInterval: 2 * OneYearDuration,
CertRotateInterval: 2 * OneYearDuration,
CertOverlapInterval: 2 * OneYearDuration,
},
isValid: true,
}),
Entry("CertRotateInterval has to default to CARotateInterval", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 2 * OneYearDuration,
CAOverlapInterval: 1 * OneYearDuration,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: MutatingWebhook,
CARotateInterval: 2 * OneYearDuration,
CAOverlapInterval: 1 * OneYearDuration,
CertRotateInterval: 2 * OneYearDuration,
CertOverlapInterval: 2 * OneYearDuration,
},
isValid: true,
}),
Entry("CertOverlapInterval has to default to CertRotateInterval", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 2 * OneYearDuration,
CAOverlapInterval: 1 * OneYearDuration,
CertRotateInterval: OneYearDuration / 2,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: MutatingWebhook,
CARotateInterval: 2 * OneYearDuration,
CAOverlapInterval: 1 * OneYearDuration,
CertRotateInterval: OneYearDuration / 2,
CertOverlapInterval: OneYearDuration / 2,
},
isValid: true,
}),
Entry("Passing CAOverlapInterval > CARotateInterval should be invalid", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 1 * time.Hour,
CAOverlapInterval: 2 * time.Hour,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 1 * time.Hour,
CAOverlapInterval: 2 * time.Hour,
},
isValid: false,
}),
Entry("Passing CertRotateInterval > CARotateInterval should be invalid", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 1 * time.Hour,
CertRotateInterval: 2 * time.Hour,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CARotateInterval: 1 * time.Hour,
CertRotateInterval: 2 * time.Hour,
},
isValid: false,
}),
Entry("Passing CertOverlapInterval > CertRotateInterval should be invalid", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CertRotateInterval: 1 * time.Hour,
CertOverlapInterval: 2 * time.Hour,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
CertRotateInterval: 1 * time.Hour,
CertOverlapInterval: 2 * time.Hour,
},
isValid: false,
}),
Entry("Passing all options override defaults", setDefaultsAndValidateCase{
options: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: ValidatingWebhook,
CARotateInterval: 1 * time.Hour,
CAOverlapInterval: 1 * time.Minute,
CertRotateInterval: 30 * time.Minute,
CertOverlapInterval: 15 * time.Minute,
},
expectedOptions: Options{
Namespace: "MyNamespace",
WebhookName: "MyWebhook",
WebhookType: ValidatingWebhook,
CARotateInterval: 1 * time.Hour,
CAOverlapInterval: 1 * time.Minute,
CertRotateInterval: 30 * time.Minute,
CertOverlapInterval: 15 * time.Minute,
},
isValid: true,
}),
)
})
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcbench
import (
"flag"
"fmt"
)
type Bytes int64
var si = []string{"", "k", "M", "G", "T", "P", "E", "Z", "Y"}
const (
B Bytes = 1
KB Bytes = 1e3
MB Bytes = 1e6
GB Bytes = 1e9
TB Bytes = 1e12
PB Bytes = 1e15
EB Bytes = 1e18
//ZB Bytes = 1e21
//YB Bytes = 1e24
KiB Bytes = 1 << 10
MiB Bytes = 1 << 20
GiB Bytes = 1 << 30
TiB Bytes = 1 << 40
PiB Bytes = 1 << 50
EiB Bytes = 1 << 60
//ZiB Bytes = 1 << 70
//YiB Bytes = 1 << 80
)
func (b Bytes) String() string {
f := float64(b)
for i, s := range si {
if f < 1000 || i == len(si)-1 {
return fmt.Sprintf("%g%sB", f, s)
}
f /= 1000
}
panic("not reached")
}
func (b *Bytes) Set(s string) error {
var num float64
var unit string
_, err := fmt.Sscanf(s, "%g%s", &num, &unit)
if err == nil {
// Try SI prefixes first.
onum := num
for _, s := range si {
if unit == s+"B" {
*b = Bytes(num)
return nil
}
num *= 1000
}
// Try binary prefixes.
num = onum
for _, s := range si {
if unit == s+"iB" {
*b = Bytes(num)
return nil
}
num *= 1024
}
}
return fmt.Errorf("expected <num><SI or binary prefix>B")
}
func FlagBytes(name string, value Bytes, usage string) *Bytes {
flag.Var(&value, name, usage)
return &value
}
func ParseBytes(s string) (Bytes, error) {
var b Bytes
err := b.Set(s)
return b, err
}
|
package market
import cosmostypes "github.com/cosmos/cosmos-sdk/types"
// ================= Query ================= //
type GetConfigResponse struct {
OwnerAddr cosmostypes.AccAddress `json:"owner_addr"`
ATerraContract cosmostypes.AccAddress `json:"aterra_contract"`
InterestModel cosmostypes.AccAddress `json:"interest_model"`
DistributionModel cosmostypes.AccAddress `json:"distribution_model"`
OverseerContract cosmostypes.AccAddress `json:"overseer_contract"`
CollectorContract cosmostypes.AccAddress `json:"collector_contract"`
FaucetContract cosmostypes.AccAddress `json:"faucet_contract"`
StableDenom string `json:"stable_denom"`
ReserveFactor cosmostypes.Dec `json:"reserve_factor"`
MaxBorrowFactor cosmostypes.Dec `json:"max_borrow_factor"`
}
type GetStateResponse struct {
TotalLiabilities cosmostypes.Dec `json:"total_liabilities"`
TotalReserves cosmostypes.Dec `json:"total_reserves"`
LastInterestUpdated uint64 `json:"last_interest_updated"`
LastRewardUpdated uint64 `json:"last_reward_updated"`
GlobalInterestIndex cosmostypes.Dec `json:"global_interest_index"`
GlobalRewardIndex cosmostypes.Dec `json:"global_reward_index"`
ANCEmissionRate cosmostypes.Dec `json:"anc_emission_rate"`
}
type GetEpochStateResponse struct {
ExchangeRate cosmostypes.Dec `json:"exchange_rate"`
ATokenSupply cosmostypes.Int `json:"a_token_supply"`
}
type GetBorrowerInfoResponse struct {
Borrower cosmostypes.AccAddress `json:"borrower"`
InterestIndex cosmostypes.Dec `json:"interest_index"`
RewardIndex cosmostypes.Dec `json:"reward_index"`
LoanAmount cosmostypes.Int `json:"loan_amount"`
PendingRewards cosmostypes.Dec `json:"pending_rewards"`
}
type GetBorrowerInfosResponse struct {
BorrowerInfos []GetBorrowerInfoResponse `json:"borrower_infos"`
}
|
package main
func main(a int) {
var a int
}
|
package main
import (
"bufio"
"fmt"
"io/ioutil"
"os"
)
func main() {
/*
if _, err := os.Stat("log.txt"); os.IsNotExist(err) {
fmt.Println("Log file does not exist")
} else {
fmt.Println("Log file exists")
}
contentBytes, err := ioutil.ReadFile("names.txt")
if err == nil {
var contentStr string = string(contentBytes)
fmt.Println(contentStr)
}
stmt := "Welcome IBM"
err1 := ioutil.WriteFile("ibm.txt", []byte(stmt), 0644)
if err1 != nil {
fmt.Println(err1)
}
fmt.Println("------------------------------------")
file, _ := os.Open("names.txt")
fileScanner := bufio.NewScanner(file)
lineCount := 0
for fileScanner.Scan() {
lineCount++
}
defer file.Close()
fmt.Println(lineCount)
fmt.Println("---------------reading a line---------------------")
fmt.Println(ReadLine(1))
fmt.Println(ReadLine(3))
fmt.Println("---------------compare files---------------------")
one, err := ioutil.ReadFile("names.txt")
if err != nil {
panic(err)
}
two, err2 := ioutil.ReadFile("names1.txt")
if err2 != nil {
panic(err2)
}
same := bytes.Equal(one, two)
fmt.Println(same)
fmt.Println("-------------delete file-----------------------")
err3 := os.Remove("log.txt")
if err3 != nil {
panic(err3)
}
fmt.Println("--------------COPY AND MOVE ----------------------")
original, err2 := os.Open("names1.txt")
if err2 != nil {
panic(err2)
}
os.Mkdir("target", 0755)
original_copy, err1 := os.Create("target/names1.txt")
if err1 != nil {
panic(err1)
}
defer original_copy.Close()
_, err3 := io.Copy(original_copy, original)
if err3 != nil {
panic(err3)
}
original.Close()
os.Remove("names1.txt")
*/
fmt.Println("-------------renaming file-----------------------")
os.Rename("ibm.txt", "ibmIndia.txt")
fmt.Println("------------reading dir------------------------")
files, err1 := ioutil.ReadDir("simpleshape")
if err1 != nil {
panic(err1)
}
for _, f := range files {
fmt.Println(f.Name())
}
fmt.Println("------------delete folder------------------------")
err2 := os.RemoveAll("delfolder")
if err2 != nil {
panic(err2)
}
fmt.Println("------------------------------------")
}
func ReadLine(lineNumber int) string {
file, _ := os.Open("names.txt")
fileScanner := bufio.NewScanner(file)
lineCount := 1
for fileScanner.Scan() {
if lineCount == lineNumber {
return fileScanner.Text()
}
lineCount++
}
defer file.Close()
return ""
}
|
package auth
import (
"fmt"
"net/http"
"strings"
"time"
"github.com/gempir/gempbot/internal/api"
"github.com/gempir/gempbot/internal/config"
"github.com/gempir/gempbot/internal/helixclient"
"github.com/gempir/gempbot/internal/log"
"github.com/gempir/gempbot/internal/store"
"github.com/golang-jwt/jwt"
"github.com/nicklaw5/helix/v2"
)
func CreateApiToken(secret string, resp *helix.ValidateTokenResponse) string {
expirationTime := time.Now().Add(365 * 24 * time.Hour)
claims := &TokenClaims{
UserID: resp.Data.UserID,
Login: resp.Data.Login,
StandardClaims: jwt.StandardClaims{
// In JWT, the expiry time is expressed as unix milliseconds
ExpiresAt: expirationTime.Unix(),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, _ := token.SignedString([]byte(secret))
return tokenString
}
type TokenClaims struct {
UserID string
Login string
StandardClaims jwt.StandardClaims
}
func (t *TokenClaims) Valid() error {
return nil
}
func NewAuth(cfg *config.Config, db *store.Database, helixClient helixclient.Client) *Auth {
return &Auth{
cfg: cfg,
db: db,
helixClient: helixClient,
}
}
type Auth struct {
helixClient helixclient.Client
db *store.Database
cfg *config.Config
}
func (a *Auth) AttemptAuth(r *http.Request, w http.ResponseWriter) (helix.ValidateTokenResponse, store.UserAccessToken, api.Error) {
resp, token, err := a.Authenticate(r)
if err != nil {
a.WriteDeleteCookieResponse(w, err)
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, err
}
return resp, token, nil
}
func (a *Auth) CanAuthenticate(r *http.Request) bool {
scToken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")
if scToken != "" {
return true
}
for _, cookie := range r.Cookies() {
if cookie.Name == "scToken" {
return true
}
}
return false
}
func (a *Auth) HasAuth(r *http.Request) bool {
return r.Header.Get("Authorization") != ""
}
func (a *Auth) Authenticate(r *http.Request) (helix.ValidateTokenResponse, store.UserAccessToken, api.Error) {
scToken := strings.TrimPrefix(r.Header.Get("Authorization"), "Bearer ")
for _, cookie := range r.Cookies() {
if cookie.Name == "scToken" {
scToken = cookie.Value
}
}
if scToken == "" {
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("no scToken cookie set"))
}
// Initialize a new instance of `Claims`
claims := &TokenClaims{}
// Parse the JWT string and store the result in `claims`.
// Note that we are passing the key in this method as well. This method will return an error
// if the token is invalid (if it has expired according to the expiry time we set on sign in),
// or if the signature does not match
tkn, err := jwt.ParseWithClaims(scToken, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(a.cfg.Secret), nil
})
if err != nil || !tkn.Valid {
log.Errorf("found to validate jwt: %s", err)
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("bad authentication"))
}
token, err := a.db.GetUserAccessToken(claims.UserID)
if err != nil {
log.Errorf("Failed to get userAccessTokenData: %s", err.Error())
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("failed to get userAccessTokenData: %s", err.Error()))
}
success, resp, err := a.helixClient.ValidateToken(token.AccessToken)
if !success || err != nil {
if err != nil {
log.Errorf("token did not validate: %s", err)
}
// Token might be expired, let's try refreshing
if resp.Error == "Unauthorized" || resp.ErrorMessage == "invalid access token" {
err := a.helixClient.RefreshToken(token)
if err != nil {
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("failed to refresh token"))
}
refreshedToken, err := a.db.GetUserAccessToken(claims.UserID)
if err != nil {
log.Errorf("Failed to get userAccessTokenData: %s", err.Error())
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("failed to get userAccessTokenData: %s", err.Error()))
}
success, resp, err = a.helixClient.ValidateToken(refreshedToken.AccessToken)
if !success || err != nil {
if err != nil {
log.Errorf("refreshed Token did not validate: %s", err)
}
return helix.ValidateTokenResponse{}, refreshedToken, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("refreshed token did not validate"))
}
return *resp, refreshedToken, nil
}
return helix.ValidateTokenResponse{}, store.UserAccessToken{}, api.NewApiError(http.StatusUnauthorized, fmt.Errorf("token not valid: %s", resp.ErrorMessage))
}
return *resp, token, nil
}
func (a *Auth) WriteDeleteCookieResponse(w http.ResponseWriter, err api.Error) {
cookie := &http.Cookie{
Name: "scToken",
Value: "",
Path: "/",
Domain: a.cfg.CookieDomain,
MaxAge: -1,
HttpOnly: true,
}
http.SetCookie(w, cookie)
http.Error(w, err.Error(), err.Status())
}
|
//go:generate mockgen -destination ../../internal/test/mock/pkg/runtime/mock_runtime.go github.com/wetware/ww/pkg/runtime ServiceFactory,EventProducer,EventConsumer,Service
package runtime
import (
"context"
"fmt"
"reflect"
ww "github.com/wetware/ww/pkg"
"go.uber.org/fx"
)
// DependencyError is returned if an event consumed by a registered Service does not
// have a corresponding producer registered to the runtime.
type DependencyError struct {
Type reflect.Type
}
func (err DependencyError) Error() string {
return fmt.Sprintf("no producer registered for event '%s'", err.Type)
}
// ServiceFactory is a constructor for a service.
type ServiceFactory interface {
NewService() (Service, error)
}
// EventProducer is an optional interface implemented by ServiceFactory that declares
// which events a given service produces. It is used to verify event dependencies.
type EventProducer interface {
Produces() []interface{}
}
// EventConsumer is an optional interface implemented by ServiceFactory that declares
// which events a given service consumes. It is used to verify event dependencies.
type EventConsumer interface {
Consumes() []interface{}
}
// Service is a process that runs in the background.
// A set of services constitutes a "runtime environment". Different wetware objects,
// such as Clients and Hosts, have their own runtimes.
type Service interface {
// Start the service. The startup sequence is aborted if the context expires.
Start(context.Context) error
// Stop the service. The shutdown sequence is aborted, resulting in an unclean
// shutdown, if the context expires.
Stop(context.Context) error
// Loggable representation of the service
Loggable() map[string]interface{}
}
// Config specifies a set of runtime services.
type Config struct {
fx.In
Log ww.Logger
Services []ServiceFactory `group:"runtime"`
}
// Start a runtime in the background
func Start(cfg Config, lx fx.Lifecycle) (err error) {
var loader serviceLoader
for _, factory := range cfg.Services {
loader.LoadService(lx, cfg.Log, factory)
}
return loader.Error()
}
type serviceLoader struct {
err error
prod, cons map[reflect.Type]struct{}
}
func (sl *serviceLoader) Error() error {
if sl.err != nil {
return sl.err
}
for ev := range sl.cons {
if _, ok := sl.prod[ev]; !ok {
return DependencyError{ev}
}
}
return nil
}
func (sl *serviceLoader) LoadService(lx fx.Lifecycle, log ww.Logger, factory ServiceFactory) {
if sl.err != nil {
return
}
var svc Service
if svc, sl.err = factory.NewService(); sl.err == nil {
lx.Append(hook(log, svc))
sl.addDependencies(factory)
}
}
func (sl *serviceLoader) addDependencies(f ServiceFactory) {
if ep, ok := f.(EventProducer); ok {
sl.addEventProducer(ep)
}
if ec, ok := f.(EventConsumer); ok {
sl.addEventConsumer(ec)
}
}
func (sl *serviceLoader) addEventProducer(ep EventProducer) {
if sl.prod == nil {
sl.prod = map[reflect.Type]struct{}{}
}
for _, ev := range ep.Produces() {
sl.prod[reflect.TypeOf(ev)] = struct{}{}
}
}
func (sl *serviceLoader) addEventConsumer(ec EventConsumer) {
if sl.cons == nil {
sl.cons = map[reflect.Type]struct{}{}
}
for _, ev := range ec.Consumes() {
sl.cons[reflect.TypeOf(ev)] = struct{}{}
}
}
func hook(log ww.Logger, svc Service) fx.Hook {
return fx.Hook{
OnStart: func(ctx context.Context) (err error) {
if err = svc.Start(ctx); err == nil {
log.With(svc).Debug("service started")
}
return
},
OnStop: func(ctx context.Context) (err error) {
if err = svc.Stop(ctx); err != nil {
log.With(svc).WithError(err).Debug("unclean shutdown")
}
return
},
}
}
|
package storage
import (
"errors"
)
// ErrStorageAccessConflict is an error for accessing key conflict
var ErrStorageAccessConflict = errors.New("specified key is under accessing")
// Store is an interface for caching data into backend storage
type Store interface {
Create(key string, contents []byte) error
Delete(key string) error
Get(key string) ([]byte, error)
ListKeys(key string) ([]string, error)
List(key string) ([][]byte, error)
Update(key string, contents []byte) error
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ui
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/audio"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/cuj"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/input"
"chromiumos/tast/local/mtbf/youtube"
"chromiumos/tast/local/power"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: YoutubeScreenRotate,
LacrosStatus: testing.LacrosVariantUnknown,
Desc: "Plays YouTube video, performs screen rotation using display APIs and checks for any frame drops and if the audio is routing through expected device",
Contacts: []string{"ambalavanan.m.m@intel.com", "andrescj@google.com", "intel-chrome-system-automation-team@intel.com", "chromeos-gfx-video@google.com"},
Attr: []string{"group:mainline", "informational"},
HardwareDeps: hwdep.D(hwdep.InternalDisplay(), hwdep.Speaker()),
SoftwareDeps: []string{"chrome"},
Fixture: "chromeGraphics",
})
}
// YoutubeScreenRotate plays YouTube video, performs screen rotation using display APIs and checks for any frame drops and if the audio is routing through expected device.
func YoutubeScreenRotate(ctx context.Context, s *testing.State) {
// Ensure display on to record ui performance correctly.
if err := power.TurnOnDisplay(ctx); err != nil {
s.Fatal("Failed to turn on display: ", err)
}
expectedAudioNode := "INTERNAL_SPEAKER"
var videoSource = youtube.VideoSrc{
URL: "https://www.youtube.com/watch?v=LXb3EKWsInQ",
Title: "COSTA RICA IN 4K 60fps HDR (ULTRA HD)",
Quality: "1440p60",
}
// Give 5 seconds to cleanup other resources.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
// Setting the volume to low level.
cras, err := audio.NewCras(ctx)
if err != nil {
s.Error("Failed to create Cras object: ", err)
}
vh, err := audio.NewVolumeHelper(ctx)
if err != nil {
s.Error("Failed to create the volumeHelper: ", err)
}
originalVolume, err := vh.GetVolume(ctx)
if err != nil {
s.Error("Failed to get volume: ", err)
}
testVol := 10
s.Logf("Setting Output node volume to %d", testVol)
if err := vh.SetVolume(ctx, testVol); err != nil {
s.Errorf("Failed to set output node volume to %d: %v", testVol, err)
}
defer vh.SetVolume(cleanupCtx, originalVolume)
deviceName, deviceType, err := cras.SelectedOutputDevice(ctx)
if err != nil {
s.Error("Failed to get the selected audio device: ", err)
}
if deviceType != expectedAudioNode {
s.Logf("%s audio node is not selected, selecting it", expectedAudioNode)
if err := cras.SetActiveNodeByType(ctx, expectedAudioNode); err != nil {
s.Errorf("Failed to select active device %s: %v", expectedAudioNode, err)
}
deviceName, deviceType, err = cras.SelectedOutputDevice(ctx)
if err != nil {
s.Error("Failed to get the selected audio device: ", err)
}
if deviceType != expectedAudioNode {
s.Fatalf("Failed to set the audio node type: got %q; want %q", deviceType, expectedAudioNode)
}
}
s.Logf("Selected audio device name: %s", deviceName)
s.Logf("Selected audio device type: %s", deviceType)
cr := s.FixtValue().(*chrome.Chrome)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to open the keyboard: ", err)
}
defer kb.Close()
ui := uiauto.New(tconn)
var uiHandler cuj.UIActionHandler
if uiHandler, err = cuj.NewClamshellActionHandler(ctx, tconn); err != nil {
s.Fatal("Failed to create clamshell action handler: ", err)
}
defer uiHandler.Close()
// Get display info.
dispInfo, err := display.GetInternalInfo(ctx, tconn)
if err != nil {
s.Fatal("Failed to get internal display info: ", err)
}
// Revert back to initial screen orientation.
defer func(ctx context.Context) {
s.Log("Setting back to initial orientation")
if err := display.SetDisplayRotationSync(ctx, tconn, dispInfo.ID, display.Rotate0); err != nil {
s.Fatal("Failed to rotate display 0 degree: ", err)
}
}(cleanupCtx)
extendedDisplay := false
videoApp := youtube.NewYtWeb(cr.Browser(), tconn, kb, extendedDisplay, ui, uiHandler)
if err := videoApp.OpenAndPlayVideo(videoSource)(ctx); err != nil {
s.Fatalf("Failed to open %s: %v", videoSource.URL, err)
}
defer videoApp.Close(cleanupCtx)
screenOrient := []display.OrientationType{display.OrientationPortraitPrimary, display.OrientationLandscapeSecondary, display.OrientationPortraitSecondary, display.OrientationLandscapePrimary}
dispRotates := []display.RotationAngle{display.Rotate90, display.Rotate180, display.Rotate270, display.Rotate0}
for index, rotation := range dispRotates {
if err := display.SetDisplayRotationSync(ctx, tconn, dispInfo.ID, rotation); err != nil {
s.Fatalf("Failed to rotate display %v degree: %v", rotation, err)
}
if err := videoApp.PerformFrameDropsTest(ctx); err != nil {
s.Error("Failed to play video without frame drops: ", err)
}
devName, err := crastestclient.FirstRunningDevice(ctx, audio.OutputStream)
if err != nil {
s.Fatal("Failed to detect running output device: ", err)
}
if deviceName != devName {
s.Fatalf("Failed to route the audio through expected audio node: got %q; want %q", devName, deviceName)
}
orient, err := display.GetOrientation(ctx, tconn)
if err != nil {
s.Fatal("Failed to get screen orientation: ", err)
}
if orient.Type != screenOrient[index] {
s.Fatalf("Failed to match screen orientation: got %q; want %q", orient.Type, screenOrient[index])
}
}
}
|
package chat
import (
"encoding/json"
socketio "github.com/googollee/go-socket.io"
"github.com/streadway/amqp"
"log"
)
type AMQPMessage struct {
Message Message
Emails []string
}
type Message struct {
User User
Content string
Type string
Game Game
}
type Game struct {
Id int
}
type User struct {
Id int
Email string
Username string
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
func AmpqInit(server *socketio.Server) {
conn, err := amqp.Dial("amqp://guest:guest@rabbitmq:5672/")
failOnError(err, "Failed to connect to RabbitMQ")
defer conn.Close()
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
defer ch.Close()
q, err := ch.QueueDeclare(
"chat",
true,
false,
false,
false,
nil,
)
failOnError(err, "Failed to declare a queue")
msgs, err := ch.Consume(
q.Name,
"",
true,
false,
false,
false,
nil,
)
failOnError(err, "Failed to register a consumer")
forever := make(chan bool)
go func() {
for d := range msgs {
var amqpMessage AMQPMessage
json.Unmarshal([]byte(d.Body), &amqpMessage)
for _, email := range amqpMessage.Emails {
server.BroadcastToRoom("", email, "chat", amqpMessage.Message.JsonFormat())
}
}
}()
log.Printf(" [*] Waiting for messages. To exit press CTRL+C")
<-forever
}
func (m *Message) JsonFormat() string {
var jsonData []byte
jsonData, err := json.Marshal(m)
if err != nil {
log.Println(err)
}
return string(jsonData)
}
|
package sql
import (
"fmt"
"reflect"
"strings"
)
func SelectAll(structVal interface{}) string {
valType := reflect.TypeOf(structVal)
numFields := valType.NumField()
fields := make([]string, numFields)
for i := 0; i < numFields; i++ {
fields[i] = valType.Field(i).Name
}
fmt.Println("HERE", strings.Join(fields, ", "))
return strings.Join(fields, ", ")
}
|
package command
type CreateTacoBoxCommand struct {
CreatorId string
Name string `json:"name" validate:"required"`
}
type UpdateTacoCommand struct {
Name string `json:"name"`
OperationUserID string
TacoBoxID string `validate:"required"`
}
|
package physics
import (
"github.com/20zinnm/spac/common/constants"
"github.com/jakecoffman/cp"
)
type TranslationalState struct {
Position cp.Vector
Velocity cp.Vector
}
func (t TranslationalState) Step(dt float64) TranslationalState {
return TranslationalState{
Position: t.Position.Add(t.Velocity.Mult(dt)),
Velocity: t.Velocity.Mult(constants.Damping),
}
}
func (t TranslationalState) Lerp(to TranslationalState, delta float64) TranslationalState {
return TranslationalState{
Position: t.Position.Lerp(to.Position, delta),
Velocity: t.Velocity.Lerp(to.Velocity, delta),
}
}
type RotationalState struct {
Angle float64
AngularVelocity float64
}
func (r RotationalState) Step(dt float64) RotationalState {
return RotationalState{
Angle: r.Angle + r.AngularVelocity * dt,
AngularVelocity: r.AngularVelocity * (constants.Damping),
}
}
func (r RotationalState) Lerp(to RotationalState, delta float64) RotationalState {
return RotationalState{
Angle: cp.Lerp(r.Angle, to.Angle, delta),
AngularVelocity: cp.Lerp(r.AngularVelocity, to.AngularVelocity, delta),
}
}
|
package main
import (
"bufio"
"flag"
"fmt"
"os"
"path/filepath"
"regexp"
"sync"
"sync/atomic"
"time"
)
var countOnly bool
var scan bool
var pattern *regexp.Regexp
var sum int32
var openFilesCount int32
var group sync.WaitGroup
var numFiles int32
var numLines int32
func scanFile(path string) {
for atomic.LoadInt32(&openFilesCount) >= 100 {
time.Sleep(10 * time.Millisecond)
}
atomic.AddInt32(&openFilesCount, 1)
group.Add(1)
file, err := os.Open(path)
handleError(err)
defer file.Close()
defer onDone()
if scan {
atomic.AddInt32(&numFiles, 1)
}
var countLines int32
var countFilter int32
scanner := bufio.NewScanner(file)
buff := make([]byte, 0, 64*1024)
scanner.Buffer(buff, 1024*1024)
for scanner.Scan() {
line := scanner.Text()
countLines++
if !pattern.MatchString(line) {
continue
}
if countOnly {
countFilter++
} else {
println(line)
}
}
if countFilter > 0 {
atomic.AddInt32(&sum, countFilter)
}
if scan && countLines > 0 {
atomic.AddInt32(&numLines, countLines)
}
handleError(scanner.Err())
}
func visit(path string, finfo os.FileInfo, err error) error {
if finfo.IsDir() {
return nil
}
go scanFile(path)
return nil
}
func usageError(message string) {
fmt.Println("Error: " + message)
fmt.Println("Usage: xgrep action regexp paths")
fmt.Println("Actions:")
fmt.Println("\tread - Writes the filtered lines to the stdout")
fmt.Println("\tcount - Counts the number of filtered lines")
fmt.Println("\tscan - Counts the number of files, lines and filtered lines")
os.Exit(1)
}
func handleError(err error) {
if err != nil {
panic(err)
}
}
func onDone() {
group.Done()
atomic.AddInt32(&openFilesCount, -1)
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) < 3 {
usageError("Invalid number of arguments")
}
action := args[0]
if action == "read" {
countOnly = false
} else if action == "count" {
countOnly = true
} else if action == "scan" {
scan = true
countOnly = true
} else {
usageError("Action is invalid")
}
rx := args[1]
if rx == "" {
usageError("Missing regex pattern")
}
reg, err := regexp.Compile(rx)
pattern = reg
handleError(err)
if args[2] == "" {
usageError("Missing paths")
}
for i := 2; i < len(args); i++ {
path := args[i]
_, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
fmt.Printf("Error: File not found - %s\n", path)
continue
} else if err != nil && os.IsPermission(err) {
fmt.Printf("Error: File permission - %s\n", path)
continue
}
handleError(err)
err = filepath.Walk(path, visit)
handleError(err)
}
group.Wait()
if scan {
fmt.Println("Number of files: ", numFiles)
fmt.Println("Number of lines: ", numLines)
fmt.Print("Number of filtered lines: ")
}
if countOnly {
println(atomic.LoadInt32(&sum))
}
}
|
/*
* Copyright 2018, Oath Inc.
* Licensed under the terms of the MIT license. See LICENSE file in the project root for terms.
*/
package state
import (
"log"
"sync"
"time"
)
type Lock struct {
maxCount int
mutex sync.Mutex
locks []time.Time
}
func NewLock(maxLockCount int) Lock {
return Lock{maxCount: maxLockCount}
}
func (l *Lock) Acquire(duration time.Duration) bool {
l.mutex.Lock()
defer l.mutex.Unlock()
l.removeExpired()
if len(l.locks) < l.maxCount {
l.addNew(duration)
log.Printf("Lock acquired: %v of %v, duration: %v", len(l.locks), l.maxCount, duration)
return true
}
return false
}
func (l *Lock) addNew(duration time.Duration) {
expireTime := time.Now().Add(duration)
l.locks = append(l.locks, expireTime)
}
func (l *Lock) removeExpired() {
var live []time.Time
for i := 0; i < len(l.locks); i++ {
if !isExpired(l.locks[i]) {
live = append(live, l.locks[i])
}
}
l.locks = live
}
func isExpired(t time.Time) bool {
return time.Now().After(t)
}
|
package stconfig
import (
"fmt"
"github.com/tealeg/xlsx"
)
type Sheet struct {
Row uint16
Col uint16
Data [][]string
}
func ReadXlsx(file string, sheet string) (*Sheet, error) {
wk, err := xlsx.OpenFile(file)
if err != nil {
return nil, err
}
if len(wk.Sheets) > 0 {
var st *xlsx.Sheet
if sheet == "" {
st = wk.Sheets[0]
} else {
st = wk.Sheet[sheet]
}
if st != nil {
data := make([][]string, st.MaxRow)
for r, row := range st.Rows {
data[r] = make([]string, st.MaxCol)
for c, cell := range row.Cells {
data[r][c] = cell.String()
}
}
return &Sheet{uint16(st.MaxRow), uint16(st.MaxCol), data}, nil
}
}
return nil, fmt.Errorf("can not find sheet %s", sheet)
}
|
package hud
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/tilt/pkg/model/logstore"
)
func TestPrinterProgressBackoff(t *testing.T) {
out := &bytes.Buffer{}
now := time.Now()
printer := NewIncrementalPrinter(Stdout(out))
printer.Print([]logstore.LogLine{
logstore.LogLine{Text: "layer 1: Pending\n", ProgressID: "layer 1", Time: now},
logstore.LogLine{Text: "layer 2: Pending\n", ProgressID: "layer 2", Time: now},
})
assert.Equal(t, "layer 1: Pending\nlayer 2: Pending\n", out.String())
printer.Print([]logstore.LogLine{
logstore.LogLine{Text: "layer 1: Partial\n", ProgressID: "layer 1", Time: now},
})
assert.Equal(t, "layer 1: Pending\nlayer 2: Pending\n", out.String())
now = now.Add(time.Hour)
printer.Print([]logstore.LogLine{
logstore.LogLine{Text: "layer 1: Done\n", ProgressID: "layer 1", Time: now.Add(time.Hour)},
})
assert.Equal(t, "layer 1: Pending\nlayer 2: Pending\nlayer 1: Done\n", out.String())
}
func TestPrinterMustPrint(t *testing.T) {
out := &bytes.Buffer{}
now := time.Now()
printer := NewIncrementalPrinter(Stdout(out))
printer.Print([]logstore.LogLine{
logstore.LogLine{Text: "layer 1: Pending\n", ProgressID: "layer 1", Time: now},
logstore.LogLine{Text: "layer 2: Pending\n", ProgressID: "layer 2", Time: now},
})
assert.Equal(t, "layer 1: Pending\nlayer 2: Pending\n", out.String())
printer.Print([]logstore.LogLine{
logstore.LogLine{Text: "layer 1: Done\n", ProgressID: "layer 1", ProgressMustPrint: true, Time: now},
})
assert.Equal(t, "layer 1: Pending\nlayer 2: Pending\nlayer 1: Done\n", out.String())
}
|
package gorm_test
import (
"testing"
"github.com/go-test/deep"
"github.com/porter-dev/porter/internal/models"
"gorm.io/gorm"
)
func TestCreateInfra(t *testing.T) {
tester := &tester{
dbFileName: "./porter_create_aws_infra.db",
}
setupTestEnv(tester, t)
initProject(tester, t)
defer cleanup(tester, t)
infra := &models.Infra{
Kind: models.InfraECR,
ProjectID: tester.initProjects[0].Model.ID,
Status: models.StatusCreated,
}
infra, err := tester.repo.Infra.CreateInfra(infra)
if err != nil {
t.Fatalf("%v\n", err)
}
infra, err = tester.repo.Infra.ReadInfra(infra.Model.ID)
if err != nil {
t.Fatalf("%v\n", err)
}
// make sure id is 1 and name is "ecr"
if infra.Model.ID != 1 {
t.Errorf("incorrect registry ID: expected %d, got %d\n", 1, infra.Model.ID)
}
if infra.Kind != models.InfraECR {
t.Errorf("incorrect aws infra kind: expected %s, got %s\n", models.InfraECR, infra.Kind)
}
if infra.Status != models.StatusCreated {
t.Errorf("incorrect aws infra status: expected %s, got %s\n", models.StatusCreated, infra.Status)
}
}
func TestListInfrasByProjectID(t *testing.T) {
tester := &tester{
dbFileName: "./porter_list_aws_infras.db",
}
setupTestEnv(tester, t)
initProject(tester, t)
initInfra(tester, t)
defer cleanup(tester, t)
infras, err := tester.repo.Infra.ListInfrasByProjectID(
tester.initProjects[0].Model.ID,
)
if err != nil {
t.Fatalf("%v\n", err)
}
if len(infras) != 1 {
t.Fatalf("length of aws infras incorrect: expected %d, got %d\n", 1, len(infras))
}
// make sure data is correct
expInfra := models.Infra{
Kind: "ecr",
ProjectID: tester.initProjects[0].Model.ID,
Status: models.StatusCreated,
}
infra := infras[0]
// reset fields for reflect.DeepEqual
infra.Model = gorm.Model{}
if diff := deep.Equal(expInfra, *infra); diff != nil {
t.Errorf("incorrect aws infra")
t.Error(diff)
}
}
|
package main
import (
"log"
"net/http"
"os"
"time"
"github.com/go-openapi/runtime/middleware"
"github.com/gorilla/mux"
"github.com/mactsouk/handlers"
)
var SQLFILE string = "/tmp/users.db"
var PORT string = ":1234"
var IMAGESPATH = "/tmp/files"
type notAllowedHandler struct{}
func (h notAllowedHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
handlers.MethodNotAllowedHandler(rw, r)
}
func createDatabase() bool {
_, err := os.Stat(SQLFILE)
if os.IsNotExist(err) {
if !handlers.CreateDatabase() {
log.Println("Cannot create database:", SQLFILE)
return false
} else {
// Update admin user password with a random string
randomPass := handlers.RandomPassword(5)
log.Println("Admin password is", randomPass, "PLEASE CHANGE IT!")
temp := handlers.User{1, "admin", randomPass, 0, 1, 0}
handlers.UpdateUser(temp)
}
}
fileInfo, err := os.Stat(SQLFILE)
mode := fileInfo.Mode()
if !mode.IsRegular() {
log.Println(SQLFILE + " is not a file!")
return false
}
return true
}
func main() {
arguments := os.Args
if len(arguments) == 1 {
log.Println("Usage: [SQLFILE] [IMAGESPATH] [PORT]")
log.Println("Using default values!")
} else if len(arguments) == 2 {
SQLFILE = arguments[1]
} else if len(arguments) == 3 {
SQLFILE = arguments[1]
IMAGESPATH = arguments[2]
} else if len(arguments) == 4 {
SQLFILE = arguments[1]
IMAGESPATH = arguments[2]
PORT = ":" + arguments[3]
}
handlers.SQLFILE = SQLFILE
handlers.IMAGESPATH = IMAGESPATH
if !createDatabase() {
log.Println("Cannot create database!")
return
}
err := handlers.CreateImageDirectory(IMAGESPATH)
if err != nil {
log.Println(err)
return
}
mux := mux.NewRouter()
s := http.Server{
Addr: PORT,
Handler: mux,
ErrorLog: nil,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 10 * time.Second,
}
mux.NotFoundHandler = http.HandlerFunc(handlers.DefaultHandler)
notAllowed := notAllowedHandler{}
mux.MethodNotAllowedHandler = notAllowed
putMux := mux.Methods(http.MethodPut).Subrouter()
putMux.HandleFunc("/v2/files/{filename:[a-zA-Z0-9][a-zA-Z0-9\\.]*[a-zA-Z0-9]}",
handlers.UploadFile)
getMux := mux.Methods(http.MethodGet).Subrouter()
getMux.Handle(
"/v2/files/{filename:[a-zA-Z0-9][a-zA-Z0-9\\.]*[a-zA-Z0-9]}",
http.StripPrefix("/v2/files/", http.FileServer(http.Dir(IMAGESPATH))))
getMux.HandleFunc("/v1/time", handlers.TimeHandler)
getMux.HandleFunc("/v1/getall", handlers.GetAllHandlerUpdated)
getMux.HandleFunc("/v2/time", handlers.TimeHandler)
getMux.HandleFunc("/v2/getall", handlers.GetAllHandlerV2)
// Swagger
opts := middleware.RedocOpts{SpecURL: "/swagger.yaml"}
sh := middleware.Redoc(opts, nil)
getMux.Handle("/docs", sh)
getMux.Handle("/swagger.yaml", http.FileServer(http.Dir("./")))
postMux := mux.Methods(http.MethodPost).Subrouter()
postMux.HandleFunc("/v1/add", handlers.AddHandler)
postMux.HandleFunc("/v1/login", handlers.LoginHandler)
postMux.HandleFunc("/v1/logout", handlers.LogoutHandler)
postMux.HandleFunc("/v2/add", handlers.AddHandlerV2)
postMux.HandleFunc("/v2/login", handlers.LoginHandlerV2)
postMux.HandleFunc("/v2/logout", handlers.LogoutHandlerV2)
mux.Use(handlers.MiddleWare)
log.Println("Listening to", PORT)
err = s.ListenAndServe()
if err != nil {
log.Printf("Error starting server: %s\n", err)
return
}
}
|
/*
* @lc app=leetcode.cn id=881 lang=golang
*
* [881] 救生艇
*/
package leetcode
import "sort"
// @lc code=start
func numRescueBoats(people []int, limit int) int {
sort.Ints(people)
res := 0
i, j := 0, len(people) - 1
for i < j {
if people[i] + people[j] <= limit {
i++
j--
res++
} else {
res++
j--
}
}
if i == j {
res++
}
return res
}
// @lc code=end
|
package main
import (
"fmt"
"time"
)
/**
8.3 包结构
- 源文件头部以"package<name>"声明包名称。
- 包由同一目录下的多个源码文件组成。
- 包名类似namespace,与包所在目录名,编译文件名无关。
- 可执行文件必须包含 package studyNote,入口函数 studyNote。
*/
/**
8.3.2 初始化
- 每个源文件都可以定义一个或多个初始化函数。
- 编译器不保证多个初始化函数执行次序。
- 初始化函数在单一线程被调用,仅执行一次。
- 初始化函数在包所有全局变量初始化后执行。
- 在所有初始化函数结束后才执行main.studyNote
- 无法调用初始化函数
1. 因为无法保证初始化函数执行顺序,因此全局变量应该直接用var初始化
2. 可在初始化函数中使用goroutine,可等待其结束。
3. 不应该滥用初始化函数,仅适合完成当前文件中的相关环境设置。
*/
var now = time.Now()
func init() {
fmt.Printf("now: %v\n", now)
}
func init() {
fmt.Printf("since: %v\n", time.Now().Sub(now))
}
|
package main
import "fmt"
func main(){
sum := 0
for i:=1;i<1000;i++{
if ((i%3==0) || (i%5==0)){
sum+=i
}
}
fmt.Println(sum)
}
|
package app
import (
"testing"
"github.com/go-redis/redis"
"time"
)
var (
client *redis.Client
)
func TestAppSetup(t *testing.T) {
//Use test redis
client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
}
func TestApp_StartApp(t *testing.T) {
apps := [4]*App{New(client),New(client),New(client),New(client)}
for _, val := range apps {
val.StartApp()
val.DoGeneratorWork()
}
time.Sleep(time.Second*15)
testIsGenerator := 0
testIsMessage := false
testIsErrorMessage := false
for i, val := range apps {
t.Log("App:",i,"isGenerator: ",val.isGenerator)
t.Log("App:",i,"messageBody: ",val.messageBody)
t.Log("App:",i,"errorMessageBody: ",val.errorMessageBody)
if val.isGenerator == true {
testIsGenerator++
}
if val.messageBody != "" {
testIsMessage = true
}
if val.errorMessageBody != "" {
testIsErrorMessage = true
}
}
if testIsGenerator != 1 || !testIsMessage || !testIsErrorMessage {
t.Error("App test fail")
}
}
func TestApp_GetErrors(t *testing.T) {
app := New(client)
if app.GetErrors() == "No errors" {
t.Error("Get error method fail")
}
}
|
/*******************************************************************************
* Copyright 2017 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*******************************************************************************/
// Package commons/url defines url used by Service Deployment Agent Manager.
package url
// Base returns the base url as a type of string.
func Base() string { return "/api/v1" }
// Base returns the deploy url as a type of string.
func Deploy() string { return "/deploy" }
// Base returns the apps url as a type of string.
func Apps() string { return "/apps" }
// Base returns the start url as a type of string.
func Start() string { return "/start" }
// Base returns the stop url as a type of string.
func Stop() string { return "/stop" }
// Base returns the update url as a type of string.
func Update() string { return "/update" }
// Base returns the agents url as a type of string.
func Agents() string { return "/agents" }
// Base returns the groups url as a type of string.
func Groups() string { return "/groups" }
// Base returns the create url as a type of string.
func Create() string { return "/create" }
// Base returns the join url as a type of string.
func Join() string { return "/join" }
// Base returns the leave url as a type of string.
func Leave() string { return "/leave" }
// Base returns the register url as a type of string.
func Register() string { return "/register" }
// Base returns the unregister url as a type of string.
func Unregister() string { return "/unregister" }
// Base returns the ping url as a type of string.
func Ping() string { return "/ping" } |
package models
import (
"time"
"github.com/jinzhu/gorm"
)
// Code ...
type Code struct {
gorm.Model
SendID uint
Code string `gorm:"type:varchar(50);not null"`
IsUse int32 `gorm:"type:tinyint(1);default:0;not null"`
ExpiresAt time.Time `gorm:"type:datetime;not null"`
Send Send
}
|
/*
Swap Nodes in Pairs
Given a linked list, swap every two adjacent nodes and return its head.
For example,
Given 1->2->3->4, you should return the list as 2->1->4->3.
Your algorithm should use only constant space.
You may not modify the values in the list, only nodes itself can be changed.
*/
package main
func swapPairs(head *ListNode) *ListNode {
if head == nil || head.Next == nil{
return head
}
h := new(ListNode)
h.Next = head
p,q := h,head.Next
for q != nil {
// exchange
p.Next.Next = q.Next
q.Next = p.Next
p.Next = q
// next 2 nodes
p = q.Next
q = p.Next
if q != nil {
q = q.Next
}
}
return h.Next
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
)
const registry_url = "https://cvs.khronos.org/svn/repos/ogl/trunk/doc/registry/public/api/gl.xml"
// DownloadRegistry downloads the Khronos XML registry file.
func DownloadRegistry() *Registry {
bytes := Download(registry_url)
if len(bytes) == 0 {
panic(fmt.Errorf("Can not download %s", registry_url))
}
reg := &Registry{}
if err := xml.Unmarshal(bytes, reg); err != nil {
panic(err.Error())
}
return reg
}
type KhronosAPI string
type Version string
const GLES1API = KhronosAPI("gles1")
const GLES2API = KhronosAPI("gles2") // Includes GLES 3.0 and later
func (v Version) String() string { return fmt.Sprintf("%s", string(v)) }
type Registry struct {
Group []*Group `xml:"groups>group"`
Enums []*Enums `xml:"enums"`
Command []*Command `xml:"commands>command"`
Feature []*Feature `xml:"feature"`
Extension []*ExtensionElement `xml:"extensions>extension"`
}
type NamedElementList []NamedElement
type NamedElement struct {
Name string `xml:"name,attr"`
}
type Group struct {
NamedElement
Enum NamedElementList `xml:"enum"`
}
type Enums struct {
Namespace string `xml:"namespace,attr"`
Group string `xml:"group,attr"`
Type string `xml:"type,attr"` // "bitmask"
Comment string `xml:"comment,attr"`
Enum []Enum `xml:"enum"`
}
type Enum struct {
NamedElement
Value string `xml:"value,attr"`
Type string `xml:"type,attr"` // "u" or "ull"
API KhronosAPI `xml:"api,attr"`
Alias string `xml:"alias,attr"`
}
type Command struct {
Proto ProtoOrParam `xml:"proto"`
Param []ProtoOrParam `xml:"param"`
Alias NamedElement `xml:"alias"`
}
type ProtoOrParam struct {
InnerXML string `xml:",innerxml"`
Chardata string `xml:",chardata"`
Group string `xml:"group,attr"`
Length string `xml:"len,attr"`
Ptype string `xml:"ptype"`
Name string `xml:"name"`
}
type Feature struct {
NamedElement
API KhronosAPI `xml:"api,attr"`
Number Version `xml:"number,attr"`
Require RequireOrRemoveList `xml:"require"`
Remove RequireOrRemoveList `xml:"remove"`
}
type ExtensionElement struct {
NamedElement
Supported string `xml:"supported,attr"`
Require RequireOrRemoveList `xml:"require"`
Remove RequireOrRemoveList `xml:"remove"`
}
type RequireOrRemoveList []RequireOrRemove
type RequireOrRemove struct {
API KhronosAPI `xml:"api,attr"` // for extensions only
Profile string `xml:"profile,attr"`
Comment string `xml:"comment,attr"`
Enum NamedElementList `xml:"enum"`
Command NamedElementList `xml:"command"`
}
func (l NamedElementList) Contains(name string) bool {
for _, v := range l {
if v.Name == name {
return true
}
}
return false
}
func (r *RequireOrRemove) Contains(name string) bool {
return r.Enum.Contains(name) || r.Command.Contains(name)
}
func (l RequireOrRemoveList) Contains(name string) bool {
for _, v := range l {
if v.Contains(name) {
return true
}
}
return false
}
func (e *ExtensionElement) IsSupported(api KhronosAPI) bool {
for _, v := range strings.Split(e.Supported, "|") {
if KhronosAPI(v) == api {
return true
}
}
return false
}
func (c Command) Name() string {
return c.Proto.Name
}
func (p ProtoOrParam) Type() string {
name := p.InnerXML
name = name[:strings.Index(name, "<name>")]
name = strings.Replace(name, "<ptype>", "", 1)
name = strings.Replace(name, "</ptype>", "", 1)
name = strings.TrimSpace(name)
return name
}
// ParamsAndResult returns all parameters and return value as -1.
func (cmd *Command) ParamsAndResult() map[int]ProtoOrParam {
result := cmd.Proto
result.Name = "result"
results := map[int]ProtoOrParam{-1: result}
for i, param := range cmd.Param {
results[i] = param
}
return results
}
// GetVersions returns sorted list of versions which support the given symbol.
func (r *Registry) GetVersions(api KhronosAPI, name string) []Version {
version, found := Version(""), false
for _, feature := range r.Feature {
if feature.API == api {
if feature.Require.Contains(name) {
if found {
panic(fmt.Errorf("redefinition of %s", name))
}
version, found = feature.Number, true
}
if feature.Remove != nil {
// not used in GLES
panic(fmt.Errorf("remove tag is not supported"))
}
}
}
if found {
switch version {
case "1.0":
return []Version{"1.0"}
case "2.0":
return []Version{"2.0", "3.0", "3.1", "3.2"}
case "3.0":
return []Version{"3.0", "3.1", "3.2"}
case "3.1":
return []Version{"3.1", "3.2"}
case "3.2":
return []Version{"3.2"}
default:
panic(fmt.Errorf("Uknown GLES version: %v", version))
}
} else {
return nil
}
}
// GetExtensions returns extensions which define the given symbol.
func (r *Registry) GetExtensions(api KhronosAPI, name string) []string {
var extensions []string
ExtensionLoop:
for _, extension := range r.Extension {
if extension.IsSupported(api) {
for _, require := range extension.Require {
if require.API == "" || require.API == api {
if require.Contains(name) {
extensions = append(extensions, extension.Name)
// sometimes the extension repeats definition - ignore
continue ExtensionLoop
}
}
}
if extension.Remove != nil {
// not used in GLES
panic(fmt.Errorf("remove tag is not supported"))
}
}
}
return extensions
}
var sufix_re = regexp.MustCompile("(64|)(i_|)(I|)([1-4]|[1-4]x[1-4]|)(x|ub|f|i|ui|fi|i64|)(v|)$")
func GetCoreManpage(version Version, cmdName string) (url string, data []byte) {
var urlFormat string
switch version {
case "1.0":
urlFormat = "https://www.khronos.org/opengles/sdk/1.1/docs/man/%s.xml"
case "2.0":
urlFormat = "https://www.khronos.org/opengles/sdk/docs/man/xhtml/%s.xml"
case "3.0":
urlFormat = "https://www.khronos.org/opengles/sdk/docs/man3/html/%s.xhtml"
case "3.1":
urlFormat = "https://www.khronos.org/opengles/sdk/docs/man31/html/%s.xhtml"
case "3.2":
urlFormat = "https://www.khronos.org/opengles/sdk/docs/man32/html/%s.xhtml"
default:
panic(fmt.Errorf("Uknown api version: %v", version))
}
for _, table := range []struct{ oldPrefix, newPrefix string }{
{"glDisable", "glEnable"},
{"glEnd", "glBegin"},
{"glGetBoolean", "glGet"},
{"glGetFixed", "glGet"},
{"glGetFloat", "glGet"},
{"glGetInteger", "glGet"},
{"glGetnUniform", "glGetUniform"},
{"glMemoryBarrierByRegion", "glMemoryBarrier"},
{"glProgramUniformMatrix", "glProgramUniform"},
{"glReadnPixels", "glReadPixels"},
{"glUniformMatrix", "glUniform"},
{"glUnmapBuffer", "glMapBufferRange"},
{"glVertexAttribIFormat", "glVertexAttribFormat"},
{"glVertexAttribIPointer", "glVertexAttribPointer"},
{"", ""}, // no-op
} {
if strings.HasPrefix(cmdName, table.oldPrefix) {
// Replace prefix
cmdName := table.newPrefix + strings.TrimPrefix(cmdName, table.oldPrefix)
// Try to download URL without suffix
if sufix_re.MatchString(cmdName) {
cmdName := sufix_re.ReplaceAllString(cmdName, "")
url = fmt.Sprintf(urlFormat, cmdName)
if data := Download(url); len(data) > 0 {
return url, data
}
}
// Try to download URL with suffix
url = fmt.Sprintf(urlFormat, cmdName)
if data := Download(url); len(data) > 0 {
return url, data
}
}
}
panic(fmt.Errorf("Failed to find URL for %s", cmdName))
}
func GetExtensionManpage(extension string) (url string, data []byte) {
parts := strings.Split(extension, "_")
vendor := parts[1]
page := strings.Join(parts[2:], "_")
url = fmt.Sprintf("https://www.khronos.org/registry/gles/extensions/%s/%s_%s.txt", vendor, vendor, page)
if data := Download(url); len(data) > 0 {
return url, data
}
for _, table := range []struct{ extension, page string }{
{"GL_NV_coverage_sample", "NV/EGL_NV_coverage_sample.txt"},
{"GL_NV_depth_nonlinear", "NV/EGL_NV_depth_nonlinear.txt"},
{"GL_NV_EGL_NV_coverage_sample", "NV/EGL_NV_coverage_sample.txt"},
{"GL_EXT_separate_shader_objects", "EXT/EXT_separate_shader_objects.gles.txt"},
} {
if table.extension == extension {
url = fmt.Sprintf("https://www.khronos.org/registry/gles/extensions/%s", table.page)
if data := Download(url); len(data) > 0 {
return url, data
}
}
}
panic(fmt.Errorf("Failed to find URL for %s", extension))
}
// Download the given URL. Returns empty slice if the page can not be found (404).
func Download(url string) []byte {
filename := url
filename = strings.TrimPrefix(filename, "https://")
filename = strings.Replace(filename, "/", "-", strings.Count(filename, "/")-1)
filename = strings.Replace(filename, "/", string(os.PathSeparator), 1)
filename = *cacheDir + string(os.PathSeparator) + filename
if bytes, err := ioutil.ReadFile(filename); err == nil {
return bytes
}
resp, err := http.Get(url)
if err != nil {
panic(err)
}
bytes := []byte{}
if resp.StatusCode == 200 {
bytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
} else if resp.StatusCode != 404 {
panic(fmt.Errorf("%s: %s", url, resp.Status))
}
resp.Body.Close()
dir := filename[0:strings.LastIndex(filename, string(os.PathSeparator))]
if err := os.MkdirAll(dir, 0750); err != nil {
panic(err)
}
if err := ioutil.WriteFile(filename, bytes, 0666); err != nil {
panic(err)
}
return bytes
}
|
// Copyright 2021 The Perses Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestKind_validateError(t *testing.T) {
testSuites := []struct {
title string
kind Kind
resultError error
}{
{
title: "empty kind",
kind: "",
resultError: fmt.Errorf("kind cannot be empty"),
},
{
title: "unknown kind",
kind: "unknown",
resultError: fmt.Errorf("unknown kind 'unknown' used"),
},
}
for _, test := range testSuites {
t.Run(test.title, func(t *testing.T) {
assert.Equal(t, test.resultError, (&test.kind).validate())
})
}
}
func TestKind_validate(t *testing.T) {
testSuites := []struct {
title string
kind Kind
}{
{
title: "project",
kind: KindProject,
},
}
for _, test := range testSuites {
t.Run(test.title, func(t *testing.T) {
assert.NoError(t, (&test.kind).validate())
})
}
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package context defines an internal context type.
//
// The given Context conforms to the standard Go context, but mandates
// additional methods that are specific to the kernel internals. Note however,
// that the Context described by this package carries additional constraints
// regarding concurrent access and retaining beyond the scope of a call.
//
// See the Context type for complete details.
package context
import (
"context"
"errors"
"sync"
"time"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/waiter"
)
// Blocker represents an object with control flow hooks.
//
// These may be used to perform blocking operations, sleep or otherwise
// wait, since there may be asynchronous events that require processing.
type Blocker interface {
// Interrupt interrupts any Block operations.
Interrupt()
// Interrupted notes whether this context is Interrupted.
Interrupted() bool
// BlockOn blocks until one of the previously registered events occurs,
// or some external interrupt (cancellation).
//
// The return value should indicate whether the wake-up occurred as a
// result of the requested event (versus an external interrupt).
BlockOn(waiter.Waitable, waiter.EventMask) bool
// Block blocks until an event is received from C, or some external
// interrupt. It returns nil if an event is received from C and an err if t
// is interrupted.
Block(C <-chan struct{}) error
// BlockWithTimeoutOn blocks until either the conditions of Block are
// satisfied, or the timeout is hit. Note that deadlines are not supported
// since the notion of "with respect to what clock" is not resolved.
//
// The return value is per BlockOn.
BlockWithTimeoutOn(waiter.Waitable, waiter.EventMask, time.Duration) (time.Duration, bool)
// UninterruptibleSleepStart indicates the beginning of an uninterruptible
// sleep state (equivalent to Linux's TASK_UNINTERRUPTIBLE). If deactivate
// is true and the Context represents a Task, the Task's AddressSpace is
// deactivated.
UninterruptibleSleepStart(deactivate bool)
// UninterruptibleSleepFinish indicates the end of an uninterruptible sleep
// state that was begun by a previous call to UninterruptibleSleepStart. If
// activate is true and the Context represents a Task, the Task's
// AddressSpace is activated. Normally activate is the same value as the
// deactivate parameter passed to UninterruptibleSleepStart.
UninterruptibleSleepFinish(activate bool)
}
// NoTask is an implementation of Blocker that does not block.
type NoTask struct {
cancel chan struct{}
}
// Interrupt implements Blocker.Interrupt.
func (nt *NoTask) Interrupt() {
select {
case nt.cancel <- struct{}{}:
default:
}
}
// Interrupted implements Blocker.Interrupted.
func (nt *NoTask) Interrupted() bool {
return nt.cancel != nil && len(nt.cancel) > 0
}
// Block implements Blocker.Block.
func (nt *NoTask) Block(C <-chan struct{}) error {
if nt.cancel == nil {
nt.cancel = make(chan struct{}, 1)
}
select {
case <-nt.cancel:
return errors.New("interrupted system call") // Interrupted.
case <-C:
return nil
}
}
// BlockOn implements Blocker.BlockOn.
func (nt *NoTask) BlockOn(w waiter.Waitable, mask waiter.EventMask) bool {
if nt.cancel == nil {
nt.cancel = make(chan struct{}, 1)
}
e, ch := waiter.NewChannelEntry(mask)
w.EventRegister(&e)
defer w.EventUnregister(&e)
select {
case <-nt.cancel:
return false // Interrupted.
case _, ok := <-ch:
return ok
}
}
// BlockWithTimeoutOn implements Blocker.BlockWithTimeoutOn.
func (nt *NoTask) BlockWithTimeoutOn(w waiter.Waitable, mask waiter.EventMask, duration time.Duration) (time.Duration, bool) {
if nt.cancel == nil {
nt.cancel = make(chan struct{}, 1)
}
e, ch := waiter.NewChannelEntry(mask)
w.EventRegister(&e)
defer w.EventUnregister(&e)
start := time.Now() // In system time.
t := time.AfterFunc(duration, func() { ch <- struct{}{} })
select {
case <-nt.cancel:
return time.Since(start), false // Interrupted.
case _, ok := <-ch:
if ok && t.Stop() {
// Timer never fired.
return time.Since(start), ok
}
// Timer fired, remain is zero.
return time.Duration(0), ok
}
}
// UninterruptibleSleepStart implmenents Blocker.UninterruptedSleepStart.
func (*NoTask) UninterruptibleSleepStart(bool) {}
// UninterruptibleSleepFinish implmenents Blocker.UninterruptibleSleepFinish.
func (*NoTask) UninterruptibleSleepFinish(bool) {}
// Context represents a thread of execution (hereafter "goroutine" to reflect
// Go idiosyncrasy). It carries state associated with the goroutine across API
// boundaries.
//
// While Context exists for essentially the same reasons as Go's standard
// context.Context, the standard type represents the state of an operation
// rather than that of a goroutine. This is a critical distinction:
//
// - Unlike context.Context, which "may be passed to functions running in
// different goroutines", it is *not safe* to use the same Context in multiple
// concurrent goroutines.
//
// - It is *not safe* to retain a Context passed to a function beyond the scope
// of that function call.
//
// In both cases, values extracted from the Context should be used instead.
type Context interface {
context.Context
log.Logger
Blocker
}
// logContext implements basic logging.
type logContext struct {
NoTask
log.Logger
context.Context
}
// bgContext is the context returned by context.Background.
var bgContext Context
var bgOnce sync.Once
// Background returns an empty context using the default logger.
// Generally, one should use the Task as their context when available, or avoid
// having to use a context in places where a Task is unavailable.
//
// Using a Background context for tests is fine, as long as no values are
// needed from the context in the tested code paths.
//
// The global log.SetTarget() must be called before context.Background()
func Background() Context {
bgOnce.Do(func() {
bgContext = &logContext{
Context: context.Background(),
Logger: log.Log(),
}
})
return bgContext
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
func WithValue(parent Context, key, val any) Context {
return &withValue{
Context: parent,
key: key,
val: val,
}
}
type withValue struct {
Context
key any
val any
}
// Value implements Context.Value.
func (ctx *withValue) Value(key any) any {
if key == ctx.key {
return ctx.val
}
return ctx.Context.Value(key)
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package datadogagent
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/DataDog/datadog-operator/apis/datadoghq/common"
datadoghqv1alpha1 "github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
cilium "github.com/DataDog/datadog-operator/pkg/cilium/v1"
"github.com/DataDog/datadog-operator/pkg/controller/utils"
"github.com/DataDog/datadog-operator/pkg/controller/utils/comparison"
"github.com/DataDog/datadog-operator/pkg/controller/utils/datadog"
"github.com/DataDog/datadog-operator/pkg/kubernetes"
"github.com/DataDog/datadog-operator/pkg/version"
"github.com/DataDog/datadog-operator/controllers/datadogagent/component"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/object"
edsdatadoghqv1alpha1 "github.com/DataDog/extendeddaemonset/api/v1alpha1"
)
func (r *Reconciler) reconcileAgent(logger logr.Logger, features []feature.Feature, dda *datadoghqv1alpha1.DatadogAgent, newStatus *datadoghqv1alpha1.DatadogAgentStatus) (reconcile.Result, error) {
result, err := r.manageAgentDependencies(logger, dda)
if utils.ShouldReturn(result, err) {
return result, err
}
if newStatus.Agent != nil && newStatus.Agent.DaemonsetName != "" && newStatus.Agent.DaemonsetName != daemonsetName(dda) {
return result, fmt.Errorf("the Datadog agent DaemonSet cannot be renamed once created")
}
nameNamespace := types.NamespacedName{
Name: daemonsetName(dda),
Namespace: dda.ObjectMeta.Namespace,
}
// check if EDS or DS already exist
eds := &edsdatadoghqv1alpha1.ExtendedDaemonSet{}
if r.options.ExtendedDaemonsetOptions.Enabled {
if err2 := r.client.Get(context.TODO(), nameNamespace, eds); err2 != nil {
if !errors.IsNotFound(err2) {
return result, err2
}
eds = nil
}
} else {
eds = nil
}
ds := &appsv1.DaemonSet{}
if err2 := r.client.Get(context.TODO(), nameNamespace, ds); err2 != nil {
if !errors.IsNotFound(err2) {
return result, err2
}
ds = nil
}
if !apiutils.BoolValue(dda.Spec.Agent.Enabled) {
if ds != nil {
if err = r.deleteDaemonSet(logger, dda, ds); err != nil {
return result, err
}
}
if eds != nil {
if err = r.deleteExtendedDaemonSet(logger, dda, eds); err != nil {
return result, err
}
}
newStatus.Agent = nil
return result, err
}
if r.options.ExtendedDaemonsetOptions.Enabled && apiutils.BoolValue(dda.Spec.Agent.UseExtendedDaemonset) {
if ds != nil {
// TODO manage properly the migration from DS to EDS
err = r.deleteDaemonSet(logger, dda, ds)
if err != nil {
return result, err
}
result.RequeueAfter = 5 * time.Second
return result, nil
}
if eds == nil {
return r.createNewExtendedDaemonSet(logger, dda, newStatus)
}
return r.updateExtendedDaemonSet(logger, dda, eds, newStatus)
}
// Case when Daemonset is requested
if eds != nil && r.options.ExtendedDaemonsetOptions.Enabled {
// if EDS exist delete before creating or updating the Daemonset
err = r.deleteExtendedDaemonSet(logger, dda, eds)
if err != nil {
return result, err
}
result.RequeueAfter = 5 * time.Second
return result, nil
}
if ds == nil {
return r.createNewDaemonSet(logger, dda, newStatus)
}
return r.updateDaemonSet(logger, dda, ds, newStatus)
}
func (r *Reconciler) deleteDaemonSet(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, ds *appsv1.DaemonSet) error {
err := r.client.Delete(context.TODO(), ds)
if err != nil {
return err
}
logger.Info("Delete DaemonSet", "daemonSet.Namespace", ds.Namespace, "daemonSet.Name", ds.Name)
event := buildEventInfo(ds.Name, ds.Namespace, daemonSetKind, datadog.DeletionEvent)
r.recordEvent(dda, event)
return err
}
func (r *Reconciler) deleteExtendedDaemonSet(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, eds *edsdatadoghqv1alpha1.ExtendedDaemonSet) error {
err := r.client.Delete(context.TODO(), eds)
if err != nil {
return err
}
logger.Info("Delete DaemonSet", "extendedDaemonSet.Namespace", eds.Namespace, "extendedDaemonSet.Name", eds.Name)
event := buildEventInfo(eds.Name, eds.Namespace, extendedDaemonSetKind, datadog.DeletionEvent)
r.recordEvent(dda, event)
return err
}
func (r *Reconciler) createNewExtendedDaemonSet(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, newStatus *datadoghqv1alpha1.DatadogAgentStatus) (reconcile.Result, error) {
var err error
// ExtendedDaemonSet up to date didn't exist yet, create a new one
var newEDS *edsdatadoghqv1alpha1.ExtendedDaemonSet
var hashEDS string
if newEDS, hashEDS, err = newExtendedDaemonSetFromInstance(logger, dda, nil); err != nil {
return reconcile.Result{}, err
}
// Set ExtendedDaemonSet instance as the owner and controller
if err = controllerutil.SetControllerReference(dda, newEDS, r.scheme); err != nil {
return reconcile.Result{}, err
}
logger.Info("Creating a new ExtendedDaemonSet", "extendedDaemonSet.Namespace", newEDS.Namespace, "extendedDaemonSet.Name", newEDS.Name, "agentdeployment.Status.Agent.CurrentHash", hashEDS)
err = r.client.Create(context.TODO(), newEDS)
if err != nil {
return reconcile.Result{}, err
}
event := buildEventInfo(newEDS.Name, newEDS.Namespace, extendedDaemonSetKind, datadog.CreationEvent)
r.recordEvent(dda, event)
now := metav1.NewTime(time.Now())
newStatus.Agent = updateExtendedDaemonSetStatus(newEDS, newStatus.Agent, &now)
return reconcile.Result{}, nil
}
func (r *Reconciler) createNewDaemonSet(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, newStatus *datadoghqv1alpha1.DatadogAgentStatus) (reconcile.Result, error) {
var err error
// DaemonSet up to date didn't exist yet, create a new one
var newDS *appsv1.DaemonSet
var hashDS string
if newDS, hashDS, err = newDaemonSetFromInstance(logger, dda, nil); err != nil {
return reconcile.Result{}, err
}
// Set DaemonSet instance as the owner and controller
if err = controllerutil.SetControllerReference(dda, newDS, r.scheme); err != nil {
return reconcile.Result{}, err
}
logger.Info("Creating a new DaemonSet", "daemonSet.Namespace", newDS.Namespace, "daemonSet.Name", newDS.Name, "agentdeployment.Status.Agent.CurrentHash", hashDS)
err = r.client.Create(context.TODO(), newDS)
if err != nil {
return reconcile.Result{}, err
}
event := buildEventInfo(newDS.Name, newDS.Namespace, daemonSetKind, datadog.CreationEvent)
r.recordEvent(dda, event)
now := metav1.NewTime(time.Now())
newStatus.Agent = updateDaemonSetStatus(newDS, newStatus.Agent, &now)
return reconcile.Result{}, nil
}
func (r *Reconciler) updateExtendedDaemonSet(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, eds *edsdatadoghqv1alpha1.ExtendedDaemonSet, newStatus *datadoghqv1alpha1.DatadogAgentStatus) (reconcile.Result, error) {
now := metav1.NewTime(time.Now())
newEDS, newHashEDS, err := newExtendedDaemonSetFromInstance(logger, dda, eds.Spec.Selector)
if err != nil {
return reconcile.Result{}, err
}
if comparison.IsSameSpecMD5Hash(newHashEDS, eds.GetAnnotations()) {
// no update needed so return, update the status and return
newStatus.Agent = updateExtendedDaemonSetStatus(eds, newStatus.Agent, &now)
return reconcile.Result{}, nil
}
// Set ExtendedDaemonSet instance as the owner and controller
if err = controllerutil.SetControllerReference(dda, eds, r.scheme); err != nil {
return reconcile.Result{}, err
}
logger.Info("Updating an existing ExtendedDaemonSet", "extendedDaemonSet.Namespace", newEDS.Namespace, "extendedDaemonSet.Name", newEDS.Name)
// Copy possibly changed fields
updatedEds := eds.DeepCopy()
updatedEds.Spec = *newEDS.Spec.DeepCopy()
updatedEds.Annotations = mergeAnnotationsLabels(logger, eds.GetAnnotations(), newEDS.GetAnnotations(), dda.Spec.Agent.KeepAnnotations)
updatedEds.Labels = mergeAnnotationsLabels(logger, eds.GetLabels(), newEDS.GetLabels(), dda.Spec.Agent.KeepLabels)
err = kubernetes.UpdateFromObject(context.TODO(), r.client, updatedEds, eds.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
event := buildEventInfo(updatedEds.Name, updatedEds.Namespace, extendedDaemonSetKind, datadog.UpdateEvent)
r.recordEvent(dda, event)
newStatus.Agent = updateExtendedDaemonSetStatus(updatedEds, newStatus.Agent, &now)
return reconcile.Result{RequeueAfter: 5 * time.Second}, nil
}
func getHashAnnotation(annotations map[string]string) string {
return annotations[common.MD5AgentDeploymentAnnotationKey]
}
func (r *Reconciler) updateDaemonSet(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, ds *appsv1.DaemonSet, newStatus *datadoghqv1alpha1.DatadogAgentStatus) (reconcile.Result, error) {
// Update values from current DS in any case
newStatus.Agent = updateDaemonSetStatus(ds, newStatus.Agent, nil)
newDS, newHashDS, err := newDaemonSetFromInstance(logger, dda, ds.Spec.Selector)
if err != nil {
return reconcile.Result{}, err
}
now := metav1.NewTime(time.Now())
if comparison.IsSameSpecMD5Hash(newHashDS, ds.GetAnnotations()) {
// no update needed so update the status and return
newStatus.Agent = updateDaemonSetStatus(ds, newStatus.Agent, &now)
return reconcile.Result{}, nil
}
// Set DaemonSet instance as the owner and controller
if err = controllerutil.SetControllerReference(dda, ds, r.scheme); err != nil {
return reconcile.Result{}, err
}
logger.Info("Updating an existing DaemonSet", "daemonSet.Namespace", newDS.Namespace, "daemonSet.Name", newDS.Name)
// Copy possibly changed fields
updatedDS := ds.DeepCopy()
updatedDS.Spec = *newDS.Spec.DeepCopy()
updatedDS.Annotations = mergeAnnotationsLabels(logger, ds.GetAnnotations(), newDS.GetAnnotations(), dda.Spec.Agent.KeepAnnotations)
updatedDS.Labels = mergeAnnotationsLabels(logger, ds.GetLabels(), newDS.GetLabels(), dda.Spec.Agent.KeepLabels)
err = kubernetes.UpdateFromObject(context.TODO(), r.client, updatedDS, ds.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
event := buildEventInfo(updatedDS.Name, updatedDS.Namespace, daemonSetKind, datadog.UpdateEvent)
r.recordEvent(dda, event)
newStatus.Agent = updateDaemonSetStatus(updatedDS, newStatus.Agent, &now)
return reconcile.Result{RequeueAfter: 5 * time.Second}, nil
}
func (r *Reconciler) manageAgentDependencies(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) {
result, err := r.manageAgentSecret(logger, dda)
if utils.ShouldReturn(result, err) {
return result, err
}
result, err = r.manageAgentRBACs(logger, dda)
if utils.ShouldReturn(result, err) {
return result, err
}
result, err = r.manageSystemProbeDependencies(logger, dda)
if utils.ShouldReturn(result, err) {
return result, err
}
result, err = r.manageConfigMap(logger, dda, getAgentCustomConfigConfigMapName(dda), buildAgentConfigurationConfigMap)
if utils.ShouldReturn(result, err) {
return result, err
}
result, err = r.manageConfigMap(logger, dda, component.GetInstallInfoConfigMapName(dda), buildInstallInfoConfigMap)
if utils.ShouldReturn(result, err) {
return result, err
}
result, err = r.manageAgentNetworkPolicy(logger, dda)
if utils.ShouldReturn(result, err) {
return result, err
}
result, err = r.manageAgentService(logger, dda)
if utils.ShouldReturn(result, err) {
return result, err
}
return reconcile.Result{}, nil
}
func (r *Reconciler) manageAgentNetworkPolicy(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent) (reconcile.Result, error) {
spec := dda.Spec.Agent
builder := agentNetworkPolicyBuilder{dda, spec.NetworkPolicy}
if !apiutils.BoolValue(spec.Enabled) || spec.NetworkPolicy == nil || !apiutils.BoolValue(spec.NetworkPolicy.Create) {
return r.cleanupNetworkPolicy(logger, dda, builder.Name())
}
return r.ensureNetworkPolicy(logger, dda, builder)
}
type agentNetworkPolicyBuilder struct {
dda *datadoghqv1alpha1.DatadogAgent
np *datadoghqv1alpha1.NetworkPolicySpec
}
func (b agentNetworkPolicyBuilder) Name() string {
return fmt.Sprintf("%s-%s", b.dda.Name, common.DefaultAgentResourceSuffix)
}
func (b agentNetworkPolicyBuilder) NetworkPolicySpec() *datadoghqv1alpha1.NetworkPolicySpec {
return b.np
}
func (b agentNetworkPolicyBuilder) BuildKubernetesPolicy() *networkingv1.NetworkPolicy {
dda := b.dda
name := b.Name()
egressRules := []networkingv1.NetworkPolicyEgressRule{
// Egress to datadog intake and
// kubeapi server
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 443,
},
},
},
},
// The agents are susceptible to connect to any pod that would
// be annotated with auto-discovery annotations.
//
// When a user wants to add a check on one of its pod, he needs
// to
// * annotate its pod
// * add an ingress policy from the agent on its own pod
// In order to not ask end-users to inject NetworkPolicy on the
// agent in the agent namespace, the agent must be allowed to
// probe any pod.
{},
}
protocolUDP := corev1.ProtocolUDP
protocolTCP := corev1.ProtocolTCP
ingressRules := []networkingv1.NetworkPolicyIngressRule{
// Ingress for dogstatsd
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: common.DefaultDogstatsdPort,
},
Protocol: &protocolUDP,
},
},
},
}
if isAPMEnabled(&dda.Spec) {
ingressRules = append(ingressRules, networkingv1.NetworkPolicyIngressRule{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{
Type: intstr.Int,
IntVal: *dda.Spec.Agent.Apm.HostPort,
},
Protocol: &protocolTCP,
},
},
})
}
policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Labels: object.GetDefaultLabels(dda, name, getAgentVersion(dda)),
Name: name,
Namespace: dda.Namespace,
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: b.PodSelector(),
Ingress: ingressRules,
Egress: egressRules,
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeIngress,
networkingv1.PolicyTypeEgress,
},
},
}
return policy
}
func (b agentNetworkPolicyBuilder) PodSelector() metav1.LabelSelector {
return metav1.LabelSelector{
MatchLabels: map[string]string{
kubernetes.AppKubernetesInstanceLabelKey: common.DefaultAgentResourceSuffix,
kubernetes.AppKubernetesPartOfLabelKey: object.NewPartOfLabelValue(b.dda).String(),
},
}
}
func (b agentNetworkPolicyBuilder) ddFQDNs() []cilium.FQDNSelector {
selectors := []cilium.FQDNSelector{}
ddURL := b.dda.Spec.Agent.Config.DDUrl
if ddURL != nil {
selectors = append(selectors, cilium.FQDNSelector{
MatchName: strings.TrimPrefix(*ddURL, "https://"),
})
}
var site string
if b.dda.Spec.Site != "" {
site = b.dda.Spec.Site
} else {
site = defaultSite
}
selectors = append(selectors, []cilium.FQDNSelector{
{
MatchPattern: fmt.Sprintf("*-app.agent.%s", site),
},
{
MatchName: fmt.Sprintf("api.%s", site),
},
{
MatchName: fmt.Sprintf("agent-intake.logs.%s", site),
},
{
MatchName: fmt.Sprintf("agent-http-intake.logs.%s", site),
},
{
MatchName: fmt.Sprintf("process.%s", site),
},
{
MatchName: fmt.Sprintf("orchestrator.%s", site),
},
}...)
return selectors
}
func (b agentNetworkPolicyBuilder) BuildCiliumPolicy() *cilium.NetworkPolicy {
specs := []cilium.NetworkPolicySpec{
{
Description: "Egress to ECS agent port 51678",
EndpointSelector: b.PodSelector(),
Egress: []cilium.EgressRule{
{
ToEntities: []cilium.Entity{cilium.EntityHost},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: "51678",
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
{
ToCIDR: []string{"169.254.0.0/16"},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: "51678",
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
{
Description: "Egress to ntp",
EndpointSelector: b.PodSelector(),
Egress: []cilium.EgressRule{
{
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: "123",
Protocol: cilium.ProtocolUDP,
},
},
},
},
ToFQDNs: []cilium.FQDNSelector{
{
MatchPattern: "*.datadog.pool.ntp.org",
},
},
},
},
},
ciliumEgressMetadataServerRule(b),
ciliumEgressDNS(b),
{
Description: "Egress to Datadog intake",
EndpointSelector: b.PodSelector(),
Egress: []cilium.EgressRule{
{
ToFQDNs: b.ddFQDNs(),
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: "443",
Protocol: cilium.ProtocolTCP,
},
{
Port: "10516",
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
{
Description: "Egress to kubelet",
EndpointSelector: b.PodSelector(),
Egress: []cilium.EgressRule{
{
ToEntities: []cilium.Entity{
cilium.EntityHost,
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: "10250",
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
},
{
Description: "Ingress for dogstatsd",
EndpointSelector: b.PodSelector(),
Ingress: []cilium.IngressRule{
{
FromEndpoints: []metav1.LabelSelector{
{},
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: strconv.Itoa(common.DefaultDogstatsdPort),
Protocol: cilium.ProtocolUDP,
},
},
},
},
},
},
},
ciliumEgressChecks(b),
}
if isAPMEnabled(&b.dda.Spec) {
specs = append(specs, cilium.NetworkPolicySpec{
Description: "Ingress for APM trace",
EndpointSelector: b.PodSelector(),
Ingress: []cilium.IngressRule{
{
FromEndpoints: []metav1.LabelSelector{
{},
},
ToPorts: []cilium.PortRule{
{
Ports: []cilium.PortProtocol{
{
Port: strconv.Itoa(int(*b.dda.Spec.Agent.Apm.HostPort)),
Protocol: cilium.ProtocolTCP,
},
},
},
},
},
},
})
}
return &cilium.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Labels: object.GetDefaultLabels(b.dda, b.Name(), getAgentVersion(b.dda)),
Name: b.Name(),
Namespace: b.dda.Namespace,
},
Specs: specs,
}
}
// newExtendedDaemonSetFromInstance creates an ExtendedDaemonSet from a given DatadogAgent
func newExtendedDaemonSetFromInstance(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, selector *metav1.LabelSelector) (*edsdatadoghqv1alpha1.ExtendedDaemonSet, string, error) {
template, err := newAgentPodTemplate(logger, dda, selector)
if err != nil {
return nil, "", fmt.Errorf("unable to get agent pod template when creating new EDS instance, err: %w", err)
}
strategy, err := getAgentDeploymentStrategy(dda)
if err != nil {
return nil, "", fmt.Errorf("unable to get Deployment strategy when creating new EDS instance, err: %w", err)
}
eds := &edsdatadoghqv1alpha1.ExtendedDaemonSet{
ObjectMeta: newDaemonsetObjectMetaData(dda),
Spec: edsdatadoghqv1alpha1.ExtendedDaemonSetSpec{
Selector: selector,
Template: *template,
Strategy: edsdatadoghqv1alpha1.ExtendedDaemonSetSpecStrategy{
Canary: strategy.Canary.DeepCopy(),
ReconcileFrequency: strategy.ReconcileFrequency.DeepCopy(),
RollingUpdate: edsdatadoghqv1alpha1.ExtendedDaemonSetSpecStrategyRollingUpdate{
MaxUnavailable: strategy.RollingUpdate.MaxUnavailable,
MaxPodSchedulerFailure: strategy.RollingUpdate.MaxPodSchedulerFailure,
MaxParallelPodCreation: strategy.RollingUpdate.MaxParallelPodCreation,
SlowStartIntervalDuration: strategy.RollingUpdate.SlowStartIntervalDuration,
SlowStartAdditiveIncrease: strategy.RollingUpdate.SlowStartAdditiveIncrease,
},
},
},
}
hash, err := comparison.SetMD5DatadogAgentGenerationAnnotation(&eds.ObjectMeta, eds.Spec)
if err != nil {
return nil, "", err
}
return eds, hash, nil
}
// newDaemonSetFromInstance creates a DaemonSet from a given DatadogAgent
func newDaemonSetFromInstance(logger logr.Logger, dda *datadoghqv1alpha1.DatadogAgent, selector *metav1.LabelSelector) (*appsv1.DaemonSet, string, error) {
template, err := newAgentPodTemplate(logger, dda, selector)
if err != nil {
return nil, "", err
}
if selector == nil {
selector = &metav1.LabelSelector{
MatchLabels: template.Labels,
}
}
strategy, err := getAgentDeploymentStrategy(dda)
if err != nil {
return nil, "", err
}
ds := &appsv1.DaemonSet{
ObjectMeta: newDaemonsetObjectMetaData(dda),
Spec: appsv1.DaemonSetSpec{
Selector: selector,
Template: *template,
UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
Type: *strategy.UpdateStrategyType,
RollingUpdate: &appsv1.RollingUpdateDaemonSet{
MaxUnavailable: strategy.RollingUpdate.MaxUnavailable,
},
},
},
}
hashDS, err := comparison.SetMD5DatadogAgentGenerationAnnotation(&ds.ObjectMeta, dda.Spec)
if err != nil {
return nil, "", err
}
return ds, hashDS, nil
}
func daemonsetName(dda *datadoghqv1alpha1.DatadogAgent) string {
if apiutils.BoolValue(dda.Spec.Agent.Enabled) && dda.Spec.Agent.DaemonsetName != "" {
return dda.Spec.Agent.DaemonsetName
}
return fmt.Sprintf("%s-%s", dda.Name, "agent")
}
func newDaemonsetObjectMetaData(dda *datadoghqv1alpha1.DatadogAgent) metav1.ObjectMeta {
labels := object.GetDefaultLabels(dda, common.DefaultAgentResourceSuffix, getAgentVersion(dda))
labels[common.AgentDeploymentNameLabelKey] = dda.Name
labels[common.AgentDeploymentComponentLabelKey] = common.DefaultAgentResourceSuffix
annotations := object.GetDefaultAnnotations(dda)
return metav1.ObjectMeta{
Name: daemonsetName(dda),
Namespace: dda.Namespace,
Labels: labels,
Annotations: annotations,
}
}
func getAgentCustomConfigConfigMapName(dda *datadoghqv1alpha1.DatadogAgent) string {
return fmt.Sprintf("%s-datadog-yaml", dda.Name)
}
func buildAgentConfigurationConfigMap(dda *datadoghqv1alpha1.DatadogAgent) (*corev1.ConfigMap, error) {
if !apiutils.BoolValue(dda.Spec.Agent.Enabled) {
return nil, nil
}
return buildConfigurationConfigMap(dda, datadoghqv1alpha1.ConvertCustomConfig(dda.Spec.Agent.CustomConfig), getAgentCustomConfigConfigMapName(dda), common.AgentCustomConfigVolumeSubPath)
}
const installInfoDataTmpl = `---
install_method:
tool: datadog-operator
tool_version: datadog-operator
installer_version: %s
`
func buildInstallInfoConfigMap(dda *datadoghqv1alpha1.DatadogAgent) (*corev1.ConfigMap, error) {
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: component.GetInstallInfoConfigMapName(dda),
Namespace: dda.Namespace,
Labels: object.GetDefaultLabels(dda, dda.Name, getAgentVersion(dda)),
Annotations: object.GetDefaultAnnotations(dda),
},
Data: map[string]string{
"install_info": fmt.Sprintf(installInfoDataTmpl, version.Version),
},
}
return configMap, nil
}
|
// mksyscall.pl syscall_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package syscall
import "unsafe"
import "syscall"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fcntl(fd uintptr, cmd int, arg int) (val int, err error) {
r0, _, e1 := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
val = int(r0)
if e1 != 0 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func IoctlTermios(fd uintptr, action int, t *Termios) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(action), uintptr(unsafe.Pointer(t)))
if e1 != 0 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func IoctlModem(fd uintptr, action int, flags *Int) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), uintptr(action), uintptr(unsafe.Pointer(flags)))
if e1 != 0 {
err = e1
}
return
}
|
package recursion
var Position [9]int
var SubNum = []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
var ZhiNum = []int{1, 2, 3, 5, 7, 11, 13, 17, 19}
func IsZhi(n int) bool {
for i := 0; i < 9; i++ {
if n == ZhiNum[i] {
return true
}
}
return false
}
func CheckB(i, n int) bool {
//纵
if i-3 >= 0 { // 跳过0,1,2
if IsZhi(Position[i]+Position[i-3]) == false {
return false
}
}
//横
if i%3 != 0 { // 跳过0,3,6
if IsZhi(Position[i]+Position[i-1]) == false {
return false
}
}
return true
}
func FillBox(i,n,r int, count *int) {
}
|
//go:build integration
// +build integration
package integration
import (
"bytes"
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestOneUp(t *testing.T) {
f := newK8sFixture(t, "oneup")
f.SetRestrictedCredentials()
f.TiltUp("oneup")
// ForwardPort will fail if all the pods are not ready.
//
// We can't use the normal Tilt-managed forwards here because
// Tilt doesn't setup forwards when --watch=false.
ctx, cancel := context.WithTimeout(f.ctx, time.Minute)
defer cancel()
f.WaitForAllPodsReady(ctx, "app=oneup")
f.ForwardPort("deployment/oneup", "31234:8000")
ctx, cancel = context.WithTimeout(f.ctx, time.Minute)
defer cancel()
f.CurlUntil(ctx, "http://localhost:31234", "🍄 One-Up! 🍄")
// minimal sanity check that the engine dump works - this really just ensures that there's no egregious
// serialization issues
var b bytes.Buffer
assert.NoErrorf(t, f.tilt.DumpEngine(f.ctx, &b), "Failed to dump engine state, command output:\n%s", b.String())
}
|
package p_00001_00100
// 17. Letter Combinations of a Phone Number, https://leetcode.com/problems/letter-combinations-of-a-phone-number/
func letterCombinations(digits string) []string {
var result []string
if len(digits) == 0 {
return result
}
lettersByDigit := map[byte]string{
'2': "abc",
'3': "def",
'4': "ghi",
'5': "jkl",
'6': "mno",
'7': "pqrs",
'8': "tuv",
'9': "wxyz",
}
solve(digits, 0, "", &result, lettersByDigit)
return result
}
func solve(digits string, currentPos int, combination string, result *[]string, lettersByDigit map[byte]string) {
if len(digits) == currentPos {
*result = append(*result, combination)
} else {
digit := digits[currentPos]
letters := lettersByDigit[digit]
for i := 0; i < len(letters); i++ {
solve(digits, currentPos+1, combination+string(letters[i]), result, lettersByDigit)
}
}
}
|
package testutil
// GifType is a gif's mime type
const GifType = "image/gif"
// GifBin is the binary of a tiny gif
var GifBin = []byte{
0x47, 0x49, 0x46, 0x38, 0x37, 0x61, 0x01, 0x00, 0x01, 0x00, 0x80,
0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2c, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x02, 0x02, 0x44, 0x01,
0x00, 0x3b,
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package gpucuj tests GPU CUJ tests on lacros Chrome and ChromeOS Chrome.
package gpucuj
import (
"context"
"fmt"
"math"
"os"
"path/filepath"
"sort"
"android.googlesource.com/platform/external/perfetto/protos/perfetto/trace/github.com/google/perfetto/perfetto_proto"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/metrics"
"chromiumos/tast/local/power"
"chromiumos/tast/testing"
)
type metricInfo struct {
unit string
direction perf.Direction
uma bool
}
var metricMap = map[string]metricInfo{
"Graphics.Smoothness.Checkerboarding.TouchScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Checkerboarding.WheelScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.AllAnimations": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.AllInteractions": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.AllSequences": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.CompositorAnimation": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.MainThreadAnimation": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.PinchZoom": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.RAF": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.ScrollbarScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.TouchScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.Video": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.CompositorThread.WheelScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.CanvasAnimation": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.CompositorAnimation": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.JSAnimation": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.MainThreadAnimation": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.PinchZoom": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.RAF": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.ScrollbarScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.TouchScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.Video": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.MainThread.WheelScroll": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.PercentDroppedFrames.SlowerThread.PinchZoom": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.AllAnimations": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.AllInteractions": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.AllSequences": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.CompositorAnimation": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.PinchZoom": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.RAF": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.ScrollbarScroll": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.TouchScroll": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.Video": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Compositor.WheelScroll": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.CanvasAnimation": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.CompositorAnimation": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.JSAnimation": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.MainThreadAnimation": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.PinchZoom": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.RAF": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.ScrollbarScroll": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.TouchScroll": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.Video": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Graphics.Smoothness.Jank.Main.WheelScroll": {
unit: "count",
direction: perf.SmallerIsBetter,
uma: true,
},
"Compositing.Display.DrawToSwapUs": {
unit: "us",
direction: perf.SmallerIsBetter,
uma: true,
},
"total_power": {
unit: "joules",
direction: perf.SmallerIsBetter,
uma: false,
},
"gpu_power": {
unit: "joules",
direction: perf.SmallerIsBetter,
uma: false,
},
"nongpu_power": {
unit: "joules",
direction: perf.SmallerIsBetter,
uma: false,
},
"rapl_duration": {
unit: "seconds",
direction: perf.SmallerIsBetter,
uma: false,
},
"cpu_power": {
unit: "joules",
direction: perf.SmallerIsBetter,
uma: false,
},
"dram_power": {
unit: "joules",
direction: perf.SmallerIsBetter,
uma: false,
},
"trace_percent_dropped": {
unit: "percent",
direction: perf.SmallerIsBetter,
uma: false,
},
"trace_fps": {
unit: "count",
direction: perf.BiggerIsBetter,
uma: false,
},
"trace_num_frames": {
unit: "count",
direction: perf.BiggerIsBetter,
uma: false,
},
}
// These are the default categories for 'UI Rendering' in chrome://tracing plus 'exo' and 'wayland'.
var tracingCategories = []string{"benchmark", "cc", "exo", "gpu", "input", "toplevel", "ui", "views", "viz", "wayland"}
type statType string
const (
meanStat = "mean"
valueStat = "value"
)
type statBucketKey struct {
metric string
stat statType
bt browser.Type
}
type metricsRecorder struct {
buckets map[statBucketKey][]float64
metricMap map[string]metricInfo
}
func (m *metricsRecorder) record(ctx context.Context, invoc *testInvocation, minfo metricInfo, key statBucketKey, value float64) error {
name := fmt.Sprintf("%s.%s.%s.%s", invoc.page.name, key.metric, string(key.stat), string(key.bt))
testing.ContextLog(ctx, name, ": ", value, " ", minfo.unit)
invoc.pv.Set(perf.Metric{
Name: name,
Unit: minfo.unit,
Direction: minfo.direction,
}, value)
m.buckets[key] = append(m.buckets[key], value)
m.metricMap[key.metric] = minfo
return nil
}
func (m *metricsRecorder) recordHistogram(ctx context.Context, invoc *testInvocation, h *metrics.Histogram) error {
// Ignore empty histograms. It's hard to define what the mean should be in this case.
if h.TotalCount() == 0 {
return nil
}
mean, err := h.Mean()
if err != nil {
return errors.Wrapf(err, "failed to get mean for histogram: %s", h.Name)
}
key := statBucketKey{
metric: h.Name,
stat: meanStat,
bt: invoc.bt,
}
minfo, ok := metricMap[key.metric]
if !ok {
return errors.Errorf("failed to lookup metric info: %s", key.metric)
}
testing.ContextLog(ctx, h)
return m.record(ctx, invoc, minfo, key, mean)
}
func (m *metricsRecorder) recordValue(ctx context.Context, invoc *testInvocation, name string, value float64) error {
key := statBucketKey{
metric: name,
stat: valueStat,
bt: invoc.bt,
}
minfo, ok := metricMap[key.metric]
if !ok {
return errors.Errorf("failed to lookup metric info: %s", key.metric)
}
return m.record(ctx, invoc, minfo, key, value)
}
func (m *metricsRecorder) recordMetric(ctx context.Context, invoc *testInvocation, metric perf.Metric, value float64) error {
key := statBucketKey{
metric: metric.Name,
stat: valueStat,
bt: invoc.bt,
}
minfo := metricInfo{
unit: metric.Unit,
direction: metric.Direction,
uma: false,
}
return m.record(ctx, invoc, minfo, key, value)
}
func (m *metricsRecorder) computeStatistics(ctx context.Context, pv *perf.Values) error {
// Collect means and standard deviations for each bucket. Each bucket contains results from several different pages.
// We define the population as the set of all pages (another option would be to define the population as the
// metric itself). For histograms (meanStat), we take a single sample which contains the means for each page.
// For single values (valueStat), we take as single sample which just consists of those values.
// We estimate the following quantities:
// page_mean:
// Meaning: The mean for all pages. (e.g. mean of histogram means)
// Estimator: sample mean
// page_stddev:
// Meaning: Variance over all pages. (e.g. variance of histogram means)
// Estimator: unbiased sample variance
// N.B. we report standard deviation not variance so even though we use Bessel's correction the standard deviation
// is still biased.
// TODO: Consider extending this to also provide data where the population is the metric itself.
// e.g. metric_stddev, metric_mean - statistics on the metric overall not per-page.
var logs []string
for k, bucket := range m.buckets {
minfo, ok := m.metricMap[k.metric]
if !ok {
return errors.Errorf("failed to lookup metric info: %s", k.metric)
}
var sum float64
for _, value := range bucket {
sum += value
}
n := float64(len(bucket))
mean := sum / n
var variance float64
for _, value := range bucket {
variance += (value - mean) * (value - mean)
}
variance /= float64(len(bucket) - 1) // Bessel's correction.
stddev := math.Sqrt(variance)
m := perf.Metric{
Name: fmt.Sprintf("all.%s.%s.%s", k.metric, "page_mean", string(k.bt)),
Unit: minfo.unit,
Direction: minfo.direction,
}
s := perf.Metric{
Name: fmt.Sprintf("all.%s.%s.%s", k.metric, "page_stddev", string(k.bt)),
Unit: minfo.unit,
Direction: perf.SmallerIsBetter, // In general, it's better if standard deviation is less.
}
logs = append(logs, fmt.Sprint(m.Name, ": ", mean, " ", m.Unit), fmt.Sprint(s.Name, ": ", stddev, " ", s.Unit))
pv.Set(m, mean)
// Standard deviation can be NaN if there weren't enough points to properly calculate it,
// including Bessel's correction. Don't report it in this case.
if !math.IsNaN(stddev) && !math.IsInf(stddev, 0) {
pv.Set(s, stddev)
}
}
// Print logs in order.
sort.Strings(logs)
for _, log := range logs {
testing.ContextLog(ctx, log)
}
return nil
}
type traceable interface {
StartTracing(ctx context.Context, categories []string, opts ...browser.TraceOption) error
StopTracing(ctx context.Context) (*perfetto_proto.Trace, error)
}
func runHistogram(ctx context.Context, tconn *chrome.TestConn, tracer traceable,
invoc *testInvocation, perfFn func(ctx context.Context) error) error {
if s, err := os.Stat(invoc.traceDir); err != nil || !s.IsDir() {
return errors.Wrap(err, "given trace directory does not appear to be a directory")
}
var keys []string
for k, v := range metricMap {
if v.uma {
keys = append(keys, k)
}
}
sort.Strings(keys)
thermal := power.NewSysfsThermalMetrics()
thermal.Setup(ctx, "") // No prefix, we use our own naming scheme.
rapl, err := power.NewRAPLSnapshot()
if err != nil {
return errors.Wrap(err, "failed to get RAPL snapshot")
}
// TODO(https://crbug.com/1162385, b/177636800): Enable systrace again
if err := tracer.StartTracing(ctx, tracingCategories, browser.DisableSystrace()); err != nil {
return err
}
histograms, err := metrics.Run(ctx, tconn, perfFn, keys...)
if err != nil {
if _, err := tracer.StopTracing(ctx); err != nil {
testing.ContextLog(ctx, "Failed to stop tracing: ", err)
}
return errors.Wrap(err, "failed to get histograms")
}
// Collect temperature first in case it decreases after the test finishes.
temps, err := thermal.SnapshotValues(ctx)
if err != nil {
if _, err := tracer.StopTracing(ctx); err != nil {
testing.ContextLog(ctx, "Failed to stop tracing: ", err)
}
return errors.Wrap(err, "failed to get temperature data")
}
// `rapl` could be nil when not supported.
var raplv *power.RAPLValues
if rapl != nil {
rd, err := rapl.DiffWithCurrentRAPL()
if err != nil {
if _, err := tracer.StopTracing(ctx); err != nil {
testing.ContextLog(ctx, "Failed to stop tracing: ", err)
}
return errors.Wrap(err, "failed to compute RAPL diffs")
}
testing.ContextLog(ctx, "RAPL duration seconds ", rd.Duration().Seconds())
raplv = rd
}
tr, err := tracer.StopTracing(ctx)
if err != nil {
return err
}
filename := fmt.Sprintf("%s-%s-trace.data.gz", string(invoc.bt), invoc.page.name)
filename = filepath.Join(invoc.traceDir, filename)
if err := chrome.SaveTraceToFile(ctx, tr, filename); err != nil {
return err
}
// Store metrics in the form: Scenario.PageSet.UMA metric name.statistic.{chromeos, lacros}.
// For example, maximized.Compositing.Display.DrawToSwapUs.mean.chromeos. In crosbolt, for each
// scenario (e.g. three-dot menu), we can then easily compare between chromeos and lacros
// for the same metric, in the same scenario.
for _, h := range histograms {
if err := invoc.metrics.recordHistogram(ctx, invoc, h); err != nil {
return err
}
}
for metric, value := range temps {
if err := invoc.metrics.recordMetric(ctx, invoc, metric, value); err != nil {
return err
}
}
if raplv != nil {
nongpuPower := raplv.Total() - raplv.Uncore()
if err := invoc.metrics.recordValue(ctx, invoc, "total_power", raplv.Total()); err != nil {
return err
}
if err := invoc.metrics.recordValue(ctx, invoc, "nongpu_power", nongpuPower); err != nil {
return err
}
if err := invoc.metrics.recordValue(ctx, invoc, "cpu_power", raplv.Core()); err != nil {
return err
}
if err := invoc.metrics.recordValue(ctx, invoc, "dram_power", raplv.DRAM()); err != nil {
return err
}
if err := invoc.metrics.recordValue(ctx, invoc, "gpu_power", raplv.Uncore()); err != nil {
return err
}
if err := invoc.metrics.recordValue(ctx, invoc, "rapl_duration", raplv.Duration().Seconds()); err != nil {
return err
}
}
return nil
}
|
package api
import (
"BiliBili.com/pkg/utils"
"BiliBili.com/service"
"github.com/gin-gonic/gin"
)
func UserRegister(c *gin.Context) {
var userRegisterService service.UserRegister
_ = c.ShouldBind(&userRegisterService)
res := userRegisterService.Register()
c.JSON(200,res)
}
func UserLogin(c *gin.Context) {
var userLoginService service.UserLogin
_ = c.ShouldBind(&userLoginService)
res := userLoginService.Login()
c.JSON(200,res)
}
func UserUpdate(c *gin.Context) {
var userUpdateService service.UserUpdate
_ = c.ShouldBind(&userUpdateService)
_,chaim,_ := utils.ParseUserToken(c.GetHeader("Authorization"))
res := userUpdateService.Update(chaim.UserId)
c.JSON(200,res)
}
func UserInfo(c *gin.Context) {
var userInfoService service.UserInfo
_ = c.ShouldBind(&userInfoService)
_,chaim,_ := utils.ParseUserToken(c.GetHeader("Authorization"))
res := userInfoService.Show(chaim.UserId)
c.JSON(200,res)
}
func UserSearch(c *gin.Context) {
var userSearchService service.UserSearch
_ = c.ShouldBind(&userSearchService)
res := userSearchService.Search()
c.JSON(200,res)
}
|
package v1alpha2
import (
"encoding/json"
"errors"
)
// DefaultPlatformArchitecture defines the default
// architecture used by mirroring platform
// release payloads.
const DefaultPlatformArchitecture = "amd64"
// PlatformType defines the content type for platforms
type PlatformType int
// TypeOCP is default
const (
TypeOCP PlatformType = iota
TypeOKD
)
var platformTypeStrings = map[PlatformType]string{
TypeOCP: "ocp",
TypeOKD: "okd",
}
var platformStringsType = map[string]PlatformType{
"ocp": TypeOCP,
"okd": TypeOKD,
}
// String returns the string representation
// of an PlatformType
func (pt PlatformType) String() string {
return platformTypeStrings[pt]
}
// MarshalJSON marshals the PlatformType as a quoted json string
func (pt PlatformType) MarshalJSON() ([]byte, error) {
if err := pt.validate(); err != nil {
return nil, err
}
return json.Marshal(pt.String())
}
// UnmarshalJSON unmarshals a quoted json string to the PlatformType
func (pt *PlatformType) UnmarshalJSON(b []byte) error {
var j string
if err := json.Unmarshal(b, &j); err != nil {
return err
}
*pt = platformStringsType[j]
return nil
}
func (pt PlatformType) validate() error {
if _, ok := platformTypeStrings[pt]; !ok {
return errors.New("unknown platform type")
}
return nil
}
|
package main
import (
"fmt"
"github.com/kormat/adventofcode/util"
"log"
"os"
"sort"
"strconv"
"strings"
)
func main() {
lines, ok := util.ReadFileArg(os.Args[1:])
if !ok {
os.Exit(1)
}
paper := 0
ribbon := 0
for _, line := range lines {
dims, err := parseDims(line)
if err {
os.Exit(1)
}
paper += calcPaper(dims)
ribbon += calcRibbon(dims)
}
fmt.Printf("The elves need %d sqft of wrapping paper, and %d feet of ribbon.\n", paper, ribbon)
}
func parseDims(arg string) ([]int, bool) {
var dims []int
args := strings.Split(arg, "x")
if len(args) != 3 {
log.Printf("Dimensions not in expected NxNxN format: %s", arg)
return dims, true
}
for _, c := range args {
i, err := strconv.Atoi(c)
if err != nil {
log.Printf("Unable to parse '%s' as integer", c)
return dims, true
}
dims = append(dims, i)
}
sort.Ints(dims)
return dims, false
}
func calcPaper(dims []int) int {
sqft := 0
for i, val1 := range dims {
if i == len(dims) {
break
}
for _, val2 := range dims[i+1:] {
sqft += 2 * val1 * val2
}
}
sqft += dims[0] * dims[1]
return sqft
}
func calcRibbon(dims []int) int {
feet := 2 * (dims[0] + dims[1])
feet += dims[0] * dims[1] * dims[2]
return feet
}
|
package oonimkall_test
import (
"testing"
"github.com/ooni/probe-cli/v3/pkg/oonimkall"
)
func TestSessionWebConnectivity(t *testing.T) {
if testing.Short() {
t.Skip("skip test in short mode")
}
sess, err := NewSessionForTesting()
if err != nil {
t.Fatal(err)
}
ctx := sess.NewContext()
config := &oonimkall.WebConnectivityConfig{
Input: "https://www.google.com",
}
results, err := sess.WebConnectivity(ctx, config)
if err != nil {
t.Fatal(err)
}
t.Logf("bytes received: %f", results.KibiBytesReceived)
t.Logf("bytes sent: %f", results.KibiBytesSent)
t.Logf("measurement: %d bytes", len(results.Measurement))
}
|
package unimatrix
type Response struct {
parser *Parser
}
func NewResponse(body []byte) (*Response, error) {
parser, error := NewParser(body)
if error != nil {
return nil, error
}
return &Response{parser: parser}, nil
}
func (response *Response) Name() (string, error) {
return response.parser.Name, nil
}
func (response *Response) TypeName() (string, error) {
return response.parser.TypeName, nil
}
func (response *Response) Ids() ([]string, error) {
return response.parser.Keys, nil
}
func (response *Response) Resources() ([]Resource, error) {
return response.parser.Resources, nil
}
func (response *Response) Count() (int, error) {
return response.parser.Count, nil
}
func (response *Response) UnlimitedCount() (int, error) {
return response.parser.UnlimitedCount, nil
}
func (response *Response) Offset() (int, error) {
return response.parser.Offset, nil
}
|
package main
import (
"context"
"flag"
"github.com/real-nil/grpc-pubsub/proto/pubsub"
"google.golang.org/grpc"
"io"
"log"
"sync/atomic"
"time"
)
var (
pubsubAddr = flag.String(`pubsub`, `:3456`, `set pubsub address`)
topic = flag.String(`topic`, `hellogrpc`, `set topic for subscribe`)
timeout = flag.Duration(`timeout`, 1*time.Minute, `set how log do it, other side of count`)
parallel = flag.Int(`parallel`, 16, `set parallel subscribers`)
)
var Q chan func()
func main() {
flag.Parse()
var (
cancel context.CancelFunc
ctx context.Context
counter int64
)
defer func() {
println(`total recv msg count = `, atomic.LoadInt64(&counter))
}()
ctx, cancel = context.WithCancel(context.Background())
if *timeout > 0 {
print(`with timeout`)
ctx, cancel = context.WithTimeout(ctx, *timeout)
}
defer cancel()
conn, err := grpc.DialContext(ctx, *pubsubAddr, grpc.WithInsecure(), grpc.WithBackoffMaxDelay(*timeout), grpc.WithBlock())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
Q = make(chan func(), 0)
runWorkers(ctx, *parallel)
pubsubCli := pubsub.NewPubSubClient(conn)
log.Printf(`start subscriber to %s/%s(%s) with timeout %s \n`, *pubsubAddr, *topic, *timeout)
log.Printf(`parallel =%d`, *parallel)
job := func() {
stream, err := pubsubCli.Subscribe(ctx, &pubsub.Topic{Name: *topic})
if err != nil {
log.Printf(`can't subscribe to topic %s: %s`, *topic, err)
}
for {
_, err := stream.Recv()
if err == io.EOF {
log.Printf(`exit`)
return
}
if err != nil {
log.Printf(`can't resv data: %s`, err)
return
}
println(`recv`, atomic.AddInt64(&counter, 1))
}
}
for {
Q <- job
}
}
func runWorkers(ctx context.Context, size int) {
for i := 0; i < size; i++ {
go func() {
select {
case job := <-Q:
job()
case <-ctx.Done():
return
}
}()
}
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
"github.com/atomicjolt/string_utils"
)
// GetSingleCourseCourses Return information on a single course.
//
// Accepts the same include[] parameters as the list action plus:
// https://canvas.instructure.com/doc/api/courses.html
//
// Path Parameters:
// # Path.ID (Required) ID
//
// Query Parameters:
// # Query.Include (Optional) . Must be one of needs_grading_count, syllabus_body, public_description, total_scores, current_grading_period_scores, term, account, course_progress, sections, storage_quota_used_mb, total_students, passback_status, favorites, teachers, observed_users, all_courses, permissions, observed_users, course_image, concluded- "all_courses": Also search recently deleted courses.
// - "permissions": Include permissions the current user has
// for the course.
// - "observed_users": include observed users in the enrollments
// - "course_image": Optional course image data for when there is a course image
// and the course image feature flag has been enabled
// - "concluded": Optional information to include with each Course. Indicates whether
// the course has been concluded, taking course and term dates into account.
// # Query.TeacherLimit (Optional) The maximum number of teacher enrollments to show.
// If the course contains more teachers than this, instead of giving the teacher
// enrollments, the count of teachers will be given under a _teacher_count_ key.
//
type GetSingleCourseCourses struct {
Path struct {
ID string `json:"id" url:"id,omitempty"` // (Required)
} `json:"path"`
Query struct {
Include []string `json:"include" url:"include,omitempty"` // (Optional) . Must be one of needs_grading_count, syllabus_body, public_description, total_scores, current_grading_period_scores, term, account, course_progress, sections, storage_quota_used_mb, total_students, passback_status, favorites, teachers, observed_users, all_courses, permissions, observed_users, course_image, concluded
TeacherLimit int64 `json:"teacher_limit" url:"teacher_limit,omitempty"` // (Optional)
} `json:"query"`
}
func (t *GetSingleCourseCourses) GetMethod() string {
return "GET"
}
func (t *GetSingleCourseCourses) GetURLPath() string {
path := "courses/{id}"
path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID))
return path
}
func (t *GetSingleCourseCourses) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *GetSingleCourseCourses) GetBody() (url.Values, error) {
return nil, nil
}
func (t *GetSingleCourseCourses) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *GetSingleCourseCourses) HasErrors() error {
errs := []string{}
if t.Path.ID == "" {
errs = append(errs, "'Path.ID' is required")
}
for _, v := range t.Query.Include {
if v != "" && !string_utils.Include([]string{"needs_grading_count", "syllabus_body", "public_description", "total_scores", "current_grading_period_scores", "term", "account", "course_progress", "sections", "storage_quota_used_mb", "total_students", "passback_status", "favorites", "teachers", "observed_users", "all_courses", "permissions", "observed_users", "course_image", "concluded"}, v) {
errs = append(errs, "Include must be one of needs_grading_count, syllabus_body, public_description, total_scores, current_grading_period_scores, term, account, course_progress, sections, storage_quota_used_mb, total_students, passback_status, favorites, teachers, observed_users, all_courses, permissions, observed_users, course_image, concluded")
}
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *GetSingleCourseCourses) Do(c *canvasapi.Canvas) (*models.Course, error) {
response, err := c.SendRequest(t)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, err
}
ret := models.Course{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}
|
package image
import (
"context"
"os"
"path/filepath"
"github.com/Dynatrace/dynatrace-operator/src/controllers/csi/metadata"
"github.com/Dynatrace/dynatrace-operator/src/dockerconfig"
dtypes "github.com/Dynatrace/dynatrace-operator/src/dtclient"
"github.com/Dynatrace/dynatrace-operator/src/installer/common"
"github.com/Dynatrace/dynatrace-operator/src/installer/symlink"
"github.com/Dynatrace/dynatrace-operator/src/installer/zip"
"github.com/Dynatrace/dynatrace-operator/src/processmoduleconfig"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/spf13/afero"
)
type Properties struct {
ImageUri string
DockerConfig dockerconfig.DockerConfig
PathResolver metadata.PathResolver
Metadata metadata.Access
imageDigest string
}
func NewImageInstaller(fs afero.Fs, props *Properties) *Installer {
return &Installer{
fs: fs,
extractor: zip.NewOneAgentExtractor(fs, props.PathResolver),
props: props,
}
}
type Installer struct {
fs afero.Fs
extractor zip.Extractor
props *Properties
}
func (installer Installer) ImageDigest() string {
return installer.props.imageDigest
}
func (installer *Installer) InstallAgent(targetDir string) (bool, error) {
log.Info("installing agent from image")
err := installer.fs.MkdirAll(installer.props.PathResolver.AgentSharedBinaryDirBase(), common.MkDirFileMode)
if err != nil {
log.Info("failed to create the base shared agent directory", "err", err)
return false, errors.WithStack(err)
}
if err := installer.installAgentFromImage(); err != nil {
_ = installer.fs.RemoveAll(targetDir)
log.Info("failed to install agent from image", "err", err)
return false, errors.WithStack(err)
}
sharedDir := installer.props.PathResolver.AgentSharedBinaryDirForImage(installer.ImageDigest())
if err := symlink.CreateSymlinkForCurrentVersionIfNotExists(installer.fs, sharedDir); err != nil {
_ = installer.fs.RemoveAll(targetDir)
_ = installer.fs.RemoveAll(sharedDir)
log.Info("failed to create symlink for agent installation", "err", err)
return false, errors.WithStack(err)
}
return true, nil
}
func (installer Installer) UpdateProcessModuleConfig(targetDir string, processModuleConfig *dtypes.ProcessModuleConfig) error {
sourceDir := installer.props.PathResolver.AgentSharedBinaryDirForImage(installer.ImageDigest())
return processmoduleconfig.CreateAgentConfigDir(installer.fs, targetDir, sourceDir, processModuleConfig)
}
func (installer *Installer) installAgentFromImage() error {
defer installer.fs.RemoveAll(CacheDir)
err := installer.fs.MkdirAll(CacheDir, common.MkDirFileMode)
if err != nil {
log.Info("failed to create cache dir", "err", err)
return errors.WithStack(err)
}
image := installer.props.ImageUri
sourceCtx, sourceRef, err := getSourceInfo(CacheDir, *installer.props)
if err != nil {
log.Info("failed to get source information", "image", image)
return errors.WithStack(err)
}
imageDigest, err := getImageDigest(sourceCtx, sourceRef)
if err != nil {
log.Info("failed to get image digest", "image", image)
return errors.WithStack(err)
}
imageDigestEncoded := imageDigest.Encoded()
if installer.isAlreadyDownloaded(imageDigestEncoded) {
log.Info("image is already installed", "image", image, "digest", imageDigestEncoded)
installer.props.imageDigest = imageDigestEncoded
return nil
}
imageCacheDir := getCacheDirPath(imageDigestEncoded)
destinationCtx, destinationRef, err := getDestinationInfo(imageCacheDir)
if err != nil {
log.Info("failed to get destination information", "image", image, "imageCacheDir", imageCacheDir)
return errors.WithStack(err)
}
err = installer.extractAgentBinariesFromImage(
imagePullInfo{
imageCacheDir: imageCacheDir,
targetDir: installer.props.PathResolver.AgentSharedBinaryDirForImage(imageDigestEncoded),
sourceCtx: sourceCtx,
destinationCtx: destinationCtx,
sourceRef: sourceRef,
destinationRef: destinationRef,
},
)
if err != nil {
log.Info("failed to extract agent binaries from image", "image", image, "imageCacheDir", imageCacheDir)
return errors.WithStack(err)
}
installer.props.imageDigest = imageDigestEncoded
return nil
}
func (installer Installer) isAlreadyDownloaded(imageDigestEncoded string) bool {
sharedDir := installer.props.PathResolver.AgentSharedBinaryDirForImage(imageDigestEncoded)
_, err := installer.fs.Stat(sharedDir)
return !os.IsNotExist(err)
}
func getImageDigest(systemContext *types.SystemContext, imageReference *types.ImageReference) (digest.Digest, error) {
return docker.GetDigest(context.TODO(), systemContext, *imageReference)
}
func getCacheDirPath(digest string) string {
return filepath.Join(CacheDir, digest)
}
|
package services
import (
"github.com/aswinda/notifyme/application/api/interfaces"
)
type UserService struct {
interfaces.IUserRepository
}
func (service *UserService) GetUserDetail(userId int) (string, error) {
result, err := service.GetUserDetail(userId)
if err != nil {
// handle error
}
return result, nil
}
|
package main
import (
"fmt"
"net"
"bufio"
"strings"
)
func game(conn1 net.Conn, conn2 net.Conn) {
writer1 := bufio.NewWriter(conn1)
reader1 := bufio.NewReader(conn1)
writer2 := bufio.NewWriter(conn2)
reader2 := bufio.NewReader(conn2)
for {
// conn1 からの入力待ち
line1, err1 := reader1.ReadString('\n')
if err1 != nil {
println("conn1 read error")
return
}
line1 = strings.TrimRight(line1, "\r\n")
println("conn1 message:", line1)
// conn2 からの入力待ち
line2, err2 := reader2.ReadString('\n')
if err2 != nil {
println("conn2 read error")
return
}
line2 = strings.TrimRight(line2, "\r\n")
println("conn2 message:", line2)
// conn1 にフレーム情報を返す
var message string = line1 + "#" + line2 + "\r\n"
writer1.WriteString(message)
writer1.Flush()
println("conn1 send:" + message)
// conn2 にフレーム情報を返す
writer2.WriteString(message)
writer2.Flush()
println("conn2 send:" + message)
}
}
func main() {
ln, err := net.Listen("tcp", ":8080")
if err != nil {
panic(err)
}
for {
conn1, err1 := ln.Accept()
if err1 != nil {
panic("conn1 accept error")
}
fmt.Printf("Accept %v\n", conn1.RemoteAddr())
conn2, err2 := ln.Accept()
if err2 != nil {
panic("conn2 accept error")
}
fmt.Printf("Accept %v\n", conn2.RemoteAddr())
fmt.Printf(">>> game start")
go game(conn1, conn2)
}
}
|
package main
import (
"fmt"
"regexp"
"strconv"
"strings"
"github.com/igvaquero18/hermezon/scraper"
)
type price struct{}
// Run checks all the products in the database and tracks its
// price in the corresponding store, looking for price drops.
func (p price) Run() {
sugar.Debug("checking all products for price drop")
results, err := db.GetAll(priceAction)
if err != nil {
sugar.Fatalw("error when reading the database", "msg", err.Error())
}
for k, v := range results {
databaseKey := k
keys := strings.Split(databaseKey, "|")
values := strings.Split(v, "|")
if len(keys) < 2 || len(values) < 2 {
sugar.Fatalw("invalid keys and/or values returned from database.", "keys", keys, "values", values)
}
reg := regexp.MustCompile(`\d+[\.\,]?\d*`)
channel := keys[0]
url := keys[1]
selector := values[0]
targetPriceStr := values[1]
targetPrice, err := strconv.ParseFloat(strings.ReplaceAll(reg.FindString(targetPriceStr), ",", "."), 64)
if err != nil {
sugar.Fatalw("invalid target price retrieved from database: %s", err.Error())
}
sugar.Debugw("checking product price for customer",
"channel", channel,
"url", url,
"selector", selector,
"target_price", targetPriceStr,
)
// Build the scraper
scr := scraper.NewScraper(
scraper.SetExpectedStatusCode(expectedStatusCode),
scraper.SetLogger(sugar),
scraper.SetMaxRetries(maxRetries),
scraper.SetRetrySeconds(retrySeconds),
scraper.SetSelector(selector),
scraper.SetTargetPrice(targetPrice),
scraper.SetURL(url),
)
go func() {
priceBelow, err := scr.IsPriceBelow()
if err != nil {
sugar.Errorw("error when checking price", "channel", channel, "url", url, "msg", err.Error())
}
if priceBelow {
sugar.Debugw("Price is below!", "channel", channel, "url", url, "desired_price", targetPriceStr)
err = messagingClient.SendMessage(
"Product is below desired price!",
fmt.Sprintf("URL: %s\nDesired price: %s", url, targetPriceStr),
twilioPhone,
channel,
)
if err != nil {
sugar.Errorw("error when sending message", "msg", err.Error())
return
}
err = db.Delete(databaseKey, priceAction)
if err != nil {
sugar.Fatalw("error when reading the database", "msg", err.Error())
}
sugar.Debugw("deleted key from bucket", "key", databaseKey, "bucket", priceAction)
return
}
sugar.Debugw("Price is not below...", "channel", channel, "url", url, "desired_price", targetPriceStr)
}()
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package memory
const (
// KiB is the number of bytes in a kibibyte.
KiB = 1024
// MiB is the number of bytes in a mebibyte.
MiB = KiB * 1024
// GiB is the number of bytes in a gibibyte.
GiB = MiB * 1024
// TiB is the number of bytes in a tebibyte.
TiB = GiB * 1024
// PageBytes is the number of bytes in a page.
PageBytes = 4096
// KiBInMiB is the denominator to convert KiB to MiB
KiBInMiB = 1024
)
|
package symmetrictree
import (
"github.com/ovsoil/leetcode/framework/structures"
)
// TreeNode Use structures.TreeNode
type TreeNode = structures.TreeNode
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isSymmetric(root *TreeNode) bool {
return relove(root, root)
}
func relove(l *TreeNode, r *TreeNode) bool {
q := []*TreeNode{l, r}
for len(q) > 0 {
u, v := q[0], q[1]
q = q[2:]
if u == nil && v == nil {
continue
}
if u == nil || v == nil {
return false
}
if u.Val != v.Val {
return false
}
q = append(q, u.Left)
q = append(q, v.Right)
q = append(q, u.Right)
q = append(q, v.Left)
}
return true
}
|
package main
import (
"fmt"
// "net/http"
"os"
"runtime"
"sync"
"sync/atomic"
"time"
)
var (
startIncr int32
endIncr int32
)
const goCount = 100000
const plusCount = 10000
const callCount = 1000
func IncrCounter(num *int32) {
atomic.AddInt32(num, 1)
}
func LoadCounter(num *int32) int32 {
return atomic.LoadInt32(num)
}
func DecrCounter(num *int32) {
atomic.AddInt32(num, -1)
}
func main() {
runtime.GOMAXPROCS(3)
var wg sync.WaitGroup
startT := time.Now()
go detect(&wg)
for i := 0; i < goCount; i++ {
wg.Add(1)
go work(i, &wg)
}
wg.Wait()
elapsed := time.Since(startT)
fmt.Println("all exit, time cost: ", elapsed)
}
func detect(wg *sync.WaitGroup) {
defer wg.Done()
runtime.LockOSThread()
defer runtime.UnlockOSThread()
startT := time.Now()
for {
fmt.Println("time since: ", time.Since(startT))
fmt.Println("start incr: ", LoadCounter(&startIncr))
fmt.Println("end incr: ", LoadCounter(&endIncr))
fmt.Println()
if LoadCounter(&startIncr) == goCount && LoadCounter(&endIncr) == goCount {
fmt.Println("finish detect")
os.Exit(0)
break
}
time.Sleep(1 * time.Millisecond)
}
}
func work(gid int, wg *sync.WaitGroup) {
defer wg.Done()
var first = true
var localCounter = 0
for {
if first == true {
IncrCounter(&startIncr)
// fmt.Printf("gid:%d#\n", gid)
}
for i := 0; i < plusCount; i++ {
localCounter += 1
}
if first == true {
IncrCounter(&endIncr)
// fmt.Printf("gid:%d#\n", gid)
first = false
}
}
}
|
package main
import (
"fmt"
"github.com/jinzhu/gorm"
"github.com/spf13/viper"
)
//DbConnection - This method will initialize a GORM DB object with the database information.
func DbConnection(c *viper.Viper) (*gorm.DB, error) {
db, err := gorm.Open("postgres", fmt.Sprintf("host=%s port=%s user=%s dbname=%s sslmode=disable", c.GetString("config.database.host"), c.GetString("config.database.port"), c.GetString("config.database.user"), c.GetString("config.database.dbname")))
if err != nil {
panic(err)
}
return db, err
}
|
package main
import (
"bytes"
"math"
)
type Statistics struct {
letters []Letter
}
type Letter struct {
lower []byte
upper []byte
count int
expected float64
freq float64
}
func percent(a, b int) float64 {
/* Takes in two numbers and returns the percentage */
return (float64(a) / float64(b)) * 100
}
func Score(p []byte) float64 {
/*
Lower the score = higher the probability of english
Evaluate the frequency in the group of text.
Compare this against the expected frequency.
Square this difference in frequency.
Add cumulative score for each letter.
*/
freq := map[byte]float64 {
'a': 8.167,
'b': 1.492,
'c': 2.782,
'd': 4.253,
'e': 12.702,
'f': 2.228,
'g': 2.015,
'h': 6.094,
'i': 6.966,
'j': 0.153,
'k': 0.772,
'l': 4.025,
'm': 2.406,
'n': 6.749,
'o': 7.507,
'p': 1.929,
'q': 0.095,
'r': 5.987,
's': 6.327,
't': 9.056,
'u': 2.758,
'v': 0.978,
'w': 2.361,
'x': 0.150,
'y': 1.974,
'z': 0.074,
'1': 0.02,
'2': 0.01,
'3': 0.01,
'4': 0.01,
'5': 0.01,
'6': 0.01,
'7': 0.01,
'8': 0.01,
'9': 0.01,
'0': 0.01,
' ': 18.74,
'.': 1.31,
',': 1.24,
'"': 0.67,
'\'': 0.44,
'-': 0.26,
'?': 0.12,
';': 0.08,
'!': 0.08,
':': 0.03,
')': 0.01,
'_': 0.01,
'(': 0.01,
}
var score float64 // Declare our score return value
s := Statistics{} // Declare a statistics struct to hold all the information.
// Calculate the statistics for each letter in the frequency table
for key,value := range freq {
/* Count how many instances of the ascii letter appear in the byte slice (upper / lower case) */
/* Calculate some key stats such as frequency occurance, expected frequency, etc. */
c := Letter{} // Create a new letter to store information
c.lower = []byte{key} // Assign the lower case version from the map to 'lower'
c.upper = []byte{key - 32} // Add 32 to set the value of the upper case equiv
a := bytes.Count(p, c.lower) // Search for how many lower case instances occur
b := bytes.Count(p, c.upper) // Search for how many upper case instances occur
c.count = a + b // Set the total count
c.expected = value // Set the expected frequency
c.freq = percent(c.count, len(p)) // Set the actual frequency
s.letters = append(s.letters, c)
}
// Perform Chi Squared test on frequencies
for i := range s.letters {
d := s.letters[i].expected - s.letters[i].freq // Subtract the expected frequency from the actual frequency
chi := math.Pow(d,2) / s.letters[i].expected // Chi Squared test for the score
score += chi // Add into a total score
}
// Add a penalty for every character outside the expected ascii range.
var c int
for key := range p {
if (p[key] < 20 || p[key] > 126) && p[key] != 10 && p[key] != 13 {
c++
}
}
score /= (1-(percent(c,len(p))/100))
return score
}
|
package parking
import (
"reflect"
"testing"
)
func Test_110(t *testing.T) {
want := []bool{true, true, false, false}
ps := Constructor(1, 1, 0)
got := []bool{
ps.AddCar(1),
ps.AddCar(2),
ps.AddCar(3),
ps.AddCar(1),
}
if !reflect.DeepEqual(want, got) {
t.Fatalf("\nwant:%v\ngot :%v\n", want, got)
}
}
func Test_000(t *testing.T) {
want := []bool{false, false, false}
ps := Constructor(0, 0, 0)
got := []bool{
ps.AddCar(1),
ps.AddCar(2),
ps.AddCar(3),
}
if !reflect.DeepEqual(want, got) {
t.Fatalf("\nwant:%v\ngot :%v\n", want, got)
}
}
func Test_200(t *testing.T) {
want := []bool{true, true, false}
ps := Constructor(2, 0, 0)
got := []bool{
ps.AddCar(1),
ps.AddCar(1),
ps.AddCar(1),
}
if !reflect.DeepEqual(want, got) {
t.Fatalf("\nwant:%v\ngot :%v\n", want, got)
}
}
|
package main
import (
"context"
"fmt"
"net/http"
"time"
"golang.org/x/sync/singleflight"
)
var gt singleflight.Group
func greetTimer(w http.ResponseWriter, r *http.Request) {
var results singleflight.Result
fmt.Println("Greet", time.Now())
defer fmt.Println("Greet finished", time.Now())
ctx, _ := context.WithTimeout(r.Context(), time.Second)
ch := gt.DoChan("greetTimer", func() (interface{}, error) {
time.Sleep(3 * time.Second)
t := time.Now()
fmt.Println("Simulate results", t)
return t, nil
})
select {
case <-ctx.Done():
if err := ctx.Err(); ctx.Err() != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
case results = <-ch:
}
fmt.Fprintf(w, "Hello World! %s", results.Val)
}
func main() {
http.HandleFunc("/greetTimer", greetTimer)
http.ListenAndServe(":8080", nil)
}
|
package server
import (
"net/http"
"github.com/gorilla/mux"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine"
"strconv"
"io/ioutil"
"encoding/json"
)
type Wallet struct {
Id int64 `json:"id"datastore:"-"`
Name string `json:"name"`
Balance float64 `json:"balance"`
}
func GetWallet(w http.ResponseWriter, r *http.Request) (interface{}, *HandlerError) {
param := mux.Vars(r)["name"]
param = mux.Vars(r)["id"]
id, err := strconv.ParseInt(param, 10, 64)
if err != nil {
return nil, &HandlerError{err, "id must be an integer", http.StatusBadRequest}
}
c := appengine.NewContext(r)
k := datastore.NewKey(c, "User", param, 0, nil)
walKey := datastore.NewKey(c, "Wallet", "", id, k)
var wal Wallet
err = datastore.Get(c, walKey, &wal)
if err != nil {
return nil, &HandlerError{err, "wallet not found", http.StatusBadRequest}
}
return wal, nil
}
func GetWallets(w http.ResponseWriter, r *http.Request) (interface{}, *HandlerError) {
param := mux.Vars(r)["name"]
c := appengine.NewContext(r)
k := datastore.NewKey(c, "User", param, 0, nil)
q := datastore.NewQuery("Wallet").Ancestor(k)
var ws []Wallet
keys, err := q.GetAll(c, &ws)
if err != nil {
return nil, &HandlerError{err, "wallets not found", http.StatusBadRequest}
}
for index, k := range keys {
ws[index].Id = k.IntID()
}
return ws, nil
}
func NewWallet(w http.ResponseWriter, r *http.Request) (interface{}, *HandlerError) {
param := mux.Vars(r)["name"]
c := appengine.NewContext(r)
k := datastore.NewKey(c, "User", param, 0, nil)
walKey := datastore.NewIncompleteKey(c, "Wallet", k)
wal, e := decodeWallet(r)
if e != nil {
return nil, &HandlerError{e, "failed to decode wallet as json", http.StatusBadRequest}
}
k, e3 := datastore.Put(c, walKey, &wal)
if e3 != nil {
return nil, &HandlerError{e3, "datastore put failed", http.StatusInternalServerError}
}
wal.Id = k.IntID()
return wal, nil
}
func decodeWallet(r *http.Request) (Wallet, error) {
var w Wallet
body, _ := ioutil.ReadAll(r.Body)
if err := json.Unmarshal(body, &w); err != nil {
return w, err
}
defer r.Body.Close()
return w, nil
}
|
package models
import "github.com/astaxie/beego/orm"
func init() {
orm.RegisterModel(&UserSkills{})
}
type UserSkills struct {
Id int
User *User `orm:"rel(fk)"`
Skill *Skill `orm:"rel(fk)"`
Level int
}
func (u *UserSkills) TableIndex() [][]string {
return [][]string{
[]string{"Id", "User"},
}
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wholepkg
// Trivial
type Struct_Empty struct{}
// Only primitives
type Struct_Primitives struct {
BoolField bool
IntField int
StringField string
FloatField float64
}
type Struct_Primitives_Alias Struct_Primitives
type Struct_Embed_Struct_Primitives struct {
Struct_Primitives
}
type Struct_Embed_Int struct {
int
}
type Struct_Struct_Primitives struct {
StructField Struct_Primitives
}
// Manual DeepCopy method
type ManualStruct struct {
StringField string
}
func (m ManualStruct) DeepCopy() ManualStruct {
return m
}
// Only pointers to primitives
type Struct_PrimitivePointers struct {
BoolPtrField *bool
IntPtrField *int
StringPtrField *string
FloatPtrField *float64
}
type Struct_PrimitivePointers_Alias Struct_PrimitivePointers
type Struct_Embed_Struct_PrimitivePointers struct {
Struct_PrimitivePointers
}
type Struct_Embed_Pointer struct {
*int
}
type Struct_Struct_PrimitivePointers struct {
StructField Struct_PrimitivePointers
}
// Everything
type Struct_Everything struct {
BoolField bool
IntField int
StringField string
FloatField float64
StructField Struct_Primitives
EmptyStructField Struct_Empty
ManualStructField ManualStruct
BoolPtrField *bool
IntPtrField *int
StringPtrField *string
FloatPtrField *float64
PrimitivePointersField Struct_PrimitivePointers
}
|
// Package locker provides locker interface
//
package locker
//go:generate mockgen -source=main.go -destination=./mock_locker/main.go
import (
"context"
"io"
"time"
)
type (
Locker interface {
LockContext(context.Context, time.Duration) error
UnLock() error
TryLock() error
io.ReadWriteSeeker
io.Closer
}
)
|
package task107
import (
"math"
)
func FindNum(k int) int {
result := 0
if k > 1 {
for i := 1; i < k; i++ {
if math.Pow(4, float64(i)) < float64(k) {
result = i
}
}
return result
}
return 0
}
|
package main
import (
"fmt"
"sync"
"sync/atomic"
)
var incr uint64 //incrementor
func main() {
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
atomic.AddUint64(&incr, 1)
fmt.Println("Increment is:", atomic.LoadUint64(&incr))
wg.Done()
}()
}
wg.Wait()
println("All routines are done")
println("Value of incrementor is:", incr, "and should be 100 if no race conditions")
println("Value is 100 therefor there is no race condition. Run 'go run -race main.go' to test for race condition.")
}
|
package gocosmos
import (
"fmt"
"reflect"
"testing"
)
func TestStmt_NumInput(t *testing.T) {
name := "TestStmt_NumInput"
testData := map[string]int{
"CREATE DATABASE dbtemp": 0,
"DROP DATABASE dbtemp": 0,
"CREATE DATABASE IF NOT EXISTS dbtemp": 0,
"DROP DATABASE IF EXISTS dbtemp": 0,
"CREATE TABLE db.tbltemp WITH pk=/id": 0,
"DROP TABLE db.tbltemp": 0,
"CREATE TABLE IF NOT EXISTS db.tbltemp WITH pk=/id": 0,
"DROP TABLE IF EXISTS db.tbltemp": 0,
"CREATE COLLECTION db.tbltemp WITH pk=/id": 0,
"DROP COLLECTION db.tbltemp": 0,
"CREATE COLLECTION IF NOT EXISTS db.tbltemp WITH pk=/id": 0,
"DROP COLLECTION IF EXISTS db.tbltemp": 0,
"SELECT * FROM tbltemp WHERE id=@1 AND email=$2 OR username=:3 WITH db=mydb": 3,
"INSERT INTO db.tbltemp (id, name, email) VALUES ($1, :2, @3)": 3 + 1, // need one extra input for partition key
"DELETE FROM db.tbltemp WHERE id=$1": 1 + 1, // need one extra input for partition key
}
for query, numInput := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if v := stmt.NumInput(); v != numInput {
t.Fatalf("%s failed: expected %#v but received %#v", name+"/"+query, numInput, v)
}
}
}
func Test_parseQuery_CreateDatabase(t *testing.T) {
name := "Test_parseQuery_CreateDatabase"
type testStruct struct {
dbName string
ifNotExists bool
ru, maxru int
}
testData := map[string]testStruct{
"CREATE DATABASE db1": {dbName: "db1", ifNotExists: false, ru: 0, maxru: 0},
"create database\ndb-2\r\nWITH ru=100": {dbName: "db-2", ifNotExists: false, ru: 100, maxru: 0},
"CREATE\nDATABASE\r\ndb_3\nwith\r\nmaxru=100": {dbName: "db_3", ifNotExists: false, ru: 0, maxru: 100},
"CREATE DATABASE\r\nIF NOT EXISTS\ndb-4-0": {dbName: "db-4-0", ifNotExists: true, ru: 0, maxru: 0},
"create\ndatabase IF NOT EXISTS db-5_0 with\r\nru=100": {dbName: "db-5_0", ifNotExists: true, ru: 100, maxru: 0},
"CREATE DATABASE if not exists db_6-0 WITH maxru=100": {dbName: "db_6-0", ifNotExists: true, ru: 0, maxru: 100},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtCreateDatabase); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtCreateDatabase", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.ifNotExists != data.ifNotExists {
t.Fatalf("%s failed: <if-not-exists> expected %#v but received %#v", name+"/"+query, data.ifNotExists, dbstmt.ifNotExists)
} else if dbstmt.ru != data.ru {
t.Fatalf("%s failed: <ru> expected %#v but received %#v", name+"/"+query, data.ru, dbstmt.ru)
} else if dbstmt.maxru != data.maxru {
t.Fatalf("%s failed: <maxru> expected %#v but received %#v", name+"/"+query, data.maxru, dbstmt.maxru)
}
}
invalidQueries := []string{
"CREATE DATABASE dbtemp WITH ru=400 WITH maxru=1000",
"CREATE DATABASE dbtemp WITH ru=-1 WITH maxru=1000",
"CREATE DATABASE dbtemp WITH ru=400 WITH maxru=-1",
"CREATE DATABASE dbtemp WITH ru=-1",
"CREATE DATABASE dbtemp WITH maxru=-1",
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_AlterDatabase(t *testing.T) {
name := "Test_parseQuery_AlterDatabase"
type testStruct struct {
dbName string
ru, maxru int
}
testData := map[string]testStruct{
"ALTER database db1 WITH ru=400": {dbName: "db1", ru: 400, maxru: 0},
"alter DATABASE db-1 with maxru=4000": {dbName: "db-1", ru: 0, maxru: 4000},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtAlterDatabase); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtAlterDatabase", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.ru != data.ru {
t.Fatalf("%s failed: <ru> expected %#v but received %#v", name+"/"+query, data.ru, dbstmt.ru)
} else if dbstmt.maxru != data.maxru {
t.Fatalf("%s failed: <maxru> expected %#v but received %#v", name+"/"+query, data.maxru, dbstmt.maxru)
}
}
invalidQueries := []string{
"ALTER DATABASE dbtemp",
"ALTER DATABASE dbtemp WITH ru=400 WITH maxru=4000",
"ALTER DATABASE dbtemp WITH ru=-1",
"ALTER DATABASE dbtemp WITH maxru=-1",
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
temp, _ := parseQuery(nil, query)
fmt.Printf("%#v\n", temp)
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_DropDatabase(t *testing.T) {
name := "Test_parseQuery_DropDatabase"
type testStruct struct {
dbName string
ifExists bool
}
testData := map[string]testStruct{
"DROP DATABASE db1": {dbName: "db1", ifExists: false},
"DROP\ndatabase\r\ndb-2": {dbName: "db-2", ifExists: false},
"drop database\r\nIF\nEXISTS db_3": {dbName: "db_3", ifExists: true},
"Drop Database If Exists db-4_0": {dbName: "db-4_0", ifExists: true},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtDropDatabase); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtDropDatabase", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.ifExists != data.ifExists {
t.Fatalf("%s failed: <if-exists> expected %#v but received %#v", name+"/"+query, data.ifExists, dbstmt.ifExists)
}
}
}
func Test_parseQuery_ListDatabases(t *testing.T) {
name := "Test_parseQuery_ListDatabases"
testData := []string{"LIST\nDATABASES", "list\r\n database"}
for _, query := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if _, ok := stmt.(*StmtListDatabases); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtListDatabases", name+"/"+query)
}
}
}
/*----------------------------------------------------------------------*/
func Test_parseQuery_CreateCollection(t *testing.T) {
name := "Test_parseQuery_CreateCollection"
type testStruct struct {
dbName string
collName string
ifNotExists bool
ru, maxru int
pk string
isLargePk bool
uk [][]string
}
testData := map[string]testStruct{
"CREATE COLLECTION db1.table1 WITH pk=/id": {dbName: "db1", collName: "table1", ifNotExists: false, ru: 0, maxru: 0, pk: "/id", isLargePk: false, uk: nil},
"create\ntable\r\ndb-2.table_2 WITH\r\nPK=/email WITH\nru=100": {dbName: "db-2", collName: "table_2", ifNotExists: false, ru: 100, maxru: 0, pk: "/email", isLargePk: false, uk: nil},
"CREATE collection\nIF\nNOT\t\nEXISTS\r\n\tdb_3.table-3 with largePK=/id WITH\tmaxru=100": {dbName: "db_3", collName: "table-3", ifNotExists: true, ru: 0, maxru: 100, pk: "/id", isLargePk: true, uk: nil},
"create TABLE if not exists db-0_1.table_0-1 WITH LARGEpk=/a/b/c with uk=/a:/b,/c/d;/e/f/g": {dbName: "db-0_1", collName: "table_0-1", ifNotExists: true, ru: 0, maxru: 0, pk: "/a/b/c", isLargePk: false, uk: [][]string{{"/a"}, {"/b", "/c/d"}, {"/e/f/g"}}},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtCreateCollection); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtCreateCollection", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.ifNotExists != data.ifNotExists {
t.Fatalf("%s failed: <if-not-exists> expected %#v but received %#v", name+"/"+query, data.ifNotExists, dbstmt.ifNotExists)
} else if dbstmt.ru != data.ru {
t.Fatalf("%s failed: <ru> expected %#v but received %#v", name+"/"+query, data.ru, dbstmt.ru)
} else if dbstmt.maxru != data.maxru {
t.Fatalf("%s failed: <maxru> expected %#v but received %#v", name+"/"+query, data.maxru, dbstmt.maxru)
} else if dbstmt.pk != data.pk {
t.Fatalf("%s failed: <pk> expected %#v but received %#v", name+"/"+query, data.pk, dbstmt.pk)
} else if !reflect.DeepEqual(dbstmt.uk, data.uk) {
t.Fatalf("%s failed: <uk> expected %#v but received %#v", name+"/"+query, data.uk, dbstmt.uk)
}
}
invalidQueries := []string{
"CREATE collection db.coll",
"CREATE collection db.coll WITH pk=/a WITH largepk=/b",
"CREATE collection db.coll WITH pk=",
"CREATE collection db.coll WITH largepk=",
"CREATE collection db.coll WITH pk=/id WITH ru=400 WITH maxru=1000",
"create TABLE db.coll WITH pk=/id WITH ru=-1 WITH maxru=1000",
"CREATE COLLECTION db.coll WITH pk=/id WITH ru=400 WITH maxru=-1",
"CREATE TABLE db.table WITH pk=/id WITH ru=-1",
"CREATE COLLECTION db.table WITH pk=/id WITH ru=-1",
"CREATE TABLE db WITH pk=/id", // no collection name
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_CreateCollectionDefaultDb(t *testing.T) {
name := "Test_parseQuery_CreateCollectionDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
ifNotExists bool
ru, maxru int
pk string
isLargePk bool
uk [][]string
}
testData := map[string]testStruct{
"CREATE COLLECTION table1 WITH pk=/id": {dbName: dbName, collName: "table1", ifNotExists: false, ru: 0, maxru: 0, pk: "/id", isLargePk: false, uk: nil},
"create\ntable\r\ndb2.table_2 WITH\r\nPK=/email WITH\nru=100": {dbName: "db2", collName: "table_2", ifNotExists: false, ru: 100, maxru: 0, pk: "/email", isLargePk: false, uk: nil},
"CREATE collection\nIF\nNOT\t\nEXISTS\r\n\ttable-3 with largePK=/id WITH\tmaxru=100": {dbName: dbName, collName: "table-3", ifNotExists: true, ru: 0, maxru: 100, pk: "/id", isLargePk: true, uk: nil},
"create TABLE if not exists db3.table_0-1 WITH LARGEpk=/a/b/c with uk=/a:/b,/c/d;/e/f/g": {dbName: "db3", collName: "table_0-1", ifNotExists: true, ru: 0, maxru: 0, pk: "/a/b/c", isLargePk: false, uk: [][]string{{"/a"}, {"/b", "/c/d"}, {"/e/f/g"}}},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtCreateCollection); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtCreateCollection", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.ifNotExists != data.ifNotExists {
t.Fatalf("%s failed: <if-not-exists> expected %#v but received %#v", name+"/"+query, data.ifNotExists, dbstmt.ifNotExists)
} else if dbstmt.ru != data.ru {
t.Fatalf("%s failed: <ru> expected %#v but received %#v", name+"/"+query, data.ru, dbstmt.ru)
} else if dbstmt.maxru != data.maxru {
t.Fatalf("%s failed: <maxru> expected %#v but received %#v", name+"/"+query, data.maxru, dbstmt.maxru)
} else if dbstmt.pk != data.pk {
t.Fatalf("%s failed: <pk> expected %#v but received %#v", name+"/"+query, data.pk, dbstmt.pk)
} else if !reflect.DeepEqual(dbstmt.uk, data.uk) {
t.Fatalf("%s failed: <uk> expected %#v but received %#v", name+"/"+query, data.uk, dbstmt.uk)
}
}
invalidQueries := []string{
"CREATE TABLE .mytable WITH pk=/id",
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_AlterCollection(t *testing.T) {
name := "Test_parseQuery_AlterCollection"
type testStruct struct {
dbName string
collName string
ru, maxru int
}
testData := map[string]testStruct{
"ALTER collection db1.table1 WITH ru=400": {dbName: "db1", collName: "table1", ru: 400, maxru: 0},
"alter\nTABLE\r\ndb-2.table_2 WITH\r\nmaxru=40000": {dbName: "db-2", collName: "table_2", ru: 0, maxru: 40000},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtAlterCollection); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtCreateCollection", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.ru != data.ru {
t.Fatalf("%s failed: <ru> expected %#v but received %#v", name+"/"+query, data.ru, dbstmt.ru)
} else if dbstmt.maxru != data.maxru {
t.Fatalf("%s failed: <maxru> expected %#v but received %#v", name+"/"+query, data.maxru, dbstmt.maxru)
}
}
invalidQueries := []string{
"ALTER collection db.coll",
"ALTER collection coll WITH ru=400",
"ALTER collection .coll WITH maxru=4000",
"alter TABLE db.coll WITH ru=400 WITH maxru=4000",
"alter TABLE db.coll WITH ru=-1",
"alter TABLE db.coll WITH maxru=-1",
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_AlterCollectionDefaultDb(t *testing.T) {
name := "Test_parseQuery_AlterCollectionDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
ru, maxru int
}
testData := map[string]testStruct{
"ALTER collection db1.table1 WITH ru=400": {dbName: "db1", collName: "table1", ru: 400, maxru: 0},
"alter\nTABLE\r\ndb-2.table_2 WITH\r\nmaxru=40000": {dbName: "db-2", collName: "table_2", ru: 0, maxru: 40000},
"ALTER collection table1 WITH ru=400": {dbName: dbName, collName: "table1", ru: 400, maxru: 0},
"alter\nTABLE\r\ntable_2 WITH\r\nmaxru=40000": {dbName: dbName, collName: "table_2", ru: 0, maxru: 40000},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtAlterCollection); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtCreateCollection", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.ru != data.ru {
t.Fatalf("%s failed: <ru> expected %#v but received %#v", name+"/"+query, data.ru, dbstmt.ru)
} else if dbstmt.maxru != data.maxru {
t.Fatalf("%s failed: <maxru> expected %#v but received %#v", name+"/"+query, data.maxru, dbstmt.maxru)
}
}
invalidQueries := []string{
"ALTER COLLECTION .mytable WITH ru=400",
"ALTER COLLECTION WITH ru=400",
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_DropCollection(t *testing.T) {
name := "Test_parseQuery_DropCollection"
type testStruct struct {
dbName string
collName string
ifExists bool
}
testData := map[string]testStruct{
"DROP COLLECTION db1.table1": {dbName: "db1", collName: "table1", ifExists: false},
"DROP\t\ntable\r\n\tdb-2.table_2": {dbName: "db-2", collName: "table_2", ifExists: false},
"drop collection\nIF EXISTS\tdb_3.table-3": {dbName: "db_3", collName: "table-3", ifExists: true},
"Drop Table If Exists db-4_0.table_4-0": {dbName: "db-4_0", collName: "table_4-0", ifExists: true},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtDropCollection); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtDropDatabase", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.ifExists != data.ifExists {
t.Fatalf("%s failed: <if-exists> expected %#v but received %#v", name+"/"+query, data.ifExists, dbstmt.ifExists)
}
}
invalidQueries := []string{
"DROP collection db", // no collection name
"drop TABLE db", // no collection name
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_DropCollectionDefaultDb(t *testing.T) {
name := "Test_parseQuery_DropCollectionDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
ifExists bool
}
testData := map[string]testStruct{
"DROP COLLECTION table1": {dbName: dbName, collName: "table1", ifExists: false},
"DROP\t\ntable\r\n\tdb-2.table_2": {dbName: "db-2", collName: "table_2", ifExists: false},
"drop collection\nIF EXISTS\ttable-3": {dbName: dbName, collName: "table-3", ifExists: true},
"Drop Table If Exists db-4_0.table_4-0": {dbName: "db-4_0", collName: "table_4-0", ifExists: true},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtDropCollection); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtDropDatabase", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.ifExists != data.ifExists {
t.Fatalf("%s failed: <if-exists> expected %#v but received %#v", name+"/"+query, data.ifExists, dbstmt.ifExists)
}
}
invalidQueries := []string{
"DROP collection .mytable",
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_ListCollections(t *testing.T) {
name := "Test_parseQuery_ListCollections"
testData := map[string]string{
"LIST COLLECTIONS from db1": "db1",
"list\n\tcollection FROM\r\n db-2": "db-2",
"LIST tables\r\n\tFROM\tdb_3": "db_3",
"list TABLE from db-4_0": "db-4_0",
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtListCollections); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtListDatabases", name+"/"+query)
} else if dbstmt.dbName != data {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data, dbstmt.dbName)
}
}
invalidQueries := []string{
"LIST COLLECTIONS",
"LIST TABLES",
"LIST COLLECTION",
"LIST TABLE",
"LIST COLLECTIONS FROM",
"LIST TABLES FROM",
"LIST COLLECTION FROM",
"LIST TABLE FROM",
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_ListCollectionsDefaultDb(t *testing.T) {
name := "Test_parseQuery_ListCollectionsDefaultDb"
dbName := "mydb"
testData := map[string]string{
"LIST COLLECTIONS": dbName,
"list\n\tcollection FROM\r\n db-2": "db-2",
"LIST tables": dbName,
"list TABLE from db-4_0": "db-4_0",
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtListCollections); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtListDatabases", name+"/"+query)
} else if dbstmt.dbName != data {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data, dbstmt.dbName)
}
}
}
func Test_parseQuery_Insert(t *testing.T) {
name := "Test_parseQuery_Insert"
type testStruct struct {
dbName string
collName string
fields []string
values []interface{}
}
testData := map[string]testStruct{
`INSERT INTO
db1.table1 (a, b, c, d, e,
f) VALUES
(null, 1.0,
true, "\"a string 'with' \\\"quote\\\"\"", "{\"key\":\"value\"}", "[2.0,null,false,\"a string 'with' \\\"quote\\\"\"]")`: {
dbName: "db1", collName: "table1", fields: []string{"a", "b", "c", "d", "e", "f"}, values: []interface{}{
nil, 1.0, true, `a string 'with' "quote"`, map[string]interface{}{"key": "value"}, []interface{}{2.0, nil, false, `a string 'with' "quote"`},
},
},
`INSERT
INTO db-2.table_2 (
a,b,c) VALUES (
$1, :3, @2)`: {
dbName: "db-2", collName: "table_2", fields: []string{"a", "b", "c"}, values: []interface{}{
placeholder{1}, placeholder{3}, placeholder{2},
},
},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtInsert); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtInsert", name+"/"+query)
} else if dbstmt.isUpsert {
t.Fatalf("%s failed: is-upsert must be disabled", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if !reflect.DeepEqual(dbstmt.fields, data.fields) {
t.Fatalf("%s failed: <fields> expected %#v but received %#v", name+"/"+query, data.fields, dbstmt.fields)
} else if !reflect.DeepEqual(dbstmt.values, data.values) {
t.Fatalf("%s failed: <values> expected %#v but received %#v", name+"/"+query, data.values, dbstmt.values)
}
}
invalidQueries := []string{
`INSERT INTO db (a,b,c) VALUES (1,2,3)`, // no collection name
`INSERT INTO db.table (a,b,c)`, // no VALUES part
`INSERT INTO db.table VALUES (1,2,3)`, // no column list
`INSERT INTO db.table (a) VALUES ('a string')`, // invalid string literature
`INSERT INTO db.table (a) VALUES ("a string")`, // should be "\"a string\""
`INSERT INTO db.table (a) VALUES ("{key:value}")`, // should be "{\"key\:\"value\"}"
`INSERT INTO db.table (a,b) VALUES (1,2,3)`, // number of field and value mismatch
`INSERT INTO db.table (a,b) VALUES (0x1qa,2)`, // invalid number
`INSERT INTO db.table (a,b) VALUES ("cannot \\"unquote",2)`, // invalid string
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_InsertDefaultDb(t *testing.T) {
name := "Test_parseQuery_InsertDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
fields []string
values []interface{}
}
testData := map[string]testStruct{
`INSERT INTO
table1 (a, b, c, d, e,
f) VALUES
(null, 1.0,
true, "\"a string 'with' \\\"quote\\\"\"", "{\"key\":\"value\"}", "[2.0,null,false,\"a string 'with' \\\"quote\\\"\"]")`: {
dbName: dbName, collName: "table1", fields: []string{"a", "b", "c", "d", "e", "f"}, values: []interface{}{
nil, 1.0, true, `a string 'with' "quote"`, map[string]interface{}{"key": "value"}, []interface{}{2.0, nil, false, `a string 'with' "quote"`},
},
},
`INSERT
INTO db-2.table_2 (
a,b,c) VALUES (
$1, :3, @2)`: {
dbName: "db-2", collName: "table_2", fields: []string{"a", "b", "c"}, values: []interface{}{
placeholder{1}, placeholder{3}, placeholder{2},
},
},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtInsert); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtInsert", name+"/"+query)
} else if dbstmt.isUpsert {
t.Fatalf("%s failed: is-upsert must be disabled", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if !reflect.DeepEqual(dbstmt.fields, data.fields) {
t.Fatalf("%s failed: <fields> expected %#v but received %#v", name+"/"+query, data.fields, dbstmt.fields)
} else if !reflect.DeepEqual(dbstmt.values, data.values) {
t.Fatalf("%s failed: <values> expected %#v but received %#v", name+"/"+query, data.values, dbstmt.values)
}
}
invalidQueries := []string{
`INSERT INTO .table (a,b) VALUES (1,2)`,
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_Upsert(t *testing.T) {
name := "Test_parseQuery_Upsert"
type testStruct struct {
dbName string
collName string
fields []string
values []interface{}
}
testData := map[string]testStruct{
`UPSERT INTO
db1.table1 (a,
b, c, d, e,
f) VALUES
(null, 1.0, true,
"\"a string 'with' \\\"quote\\\"\"", "{\"key\":\"value\"}", "[2.0,null,false,\"a string 'with' \\\"quote\\\"\"]")`: {
dbName: "db1", collName: "table1", fields: []string{"a", "b", "c", "d", "e", "f"}, values: []interface{}{
nil, 1.0, true, `a string 'with' "quote"`, map[string]interface{}{"key": "value"}, []interface{}{2.0, nil, false, `a string 'with' "quote"`},
},
},
`UPSERT
INTO db-2.table_2 (
a,b,c) VALUES ($1,
:3, @2)`: {
dbName: "db-2", collName: "table_2", fields: []string{"a", "b", "c"}, values: []interface{}{
placeholder{1}, placeholder{3}, placeholder{2},
},
},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtInsert); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtInsert", name+"/"+query)
} else if !dbstmt.isUpsert {
t.Fatalf("%s failed: is-upsert must be enabled", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if !reflect.DeepEqual(dbstmt.fields, data.fields) {
t.Fatalf("%s failed: <fields> expected %#v but received %#v", name+"/"+query, data.fields, dbstmt.fields)
} else if !reflect.DeepEqual(dbstmt.values, data.values) {
t.Fatalf("%s failed: <values> expected %#v but received %#v", name+"/"+query, data.values, dbstmt.values)
}
}
invalidQueries := []string{
`UPSERT INTO db (a,b,c) VALUES (1,2,3)`, // no collection name
`UPSERT INTO db.table (a,b,c)`, // no VALUES part
`UPSERT INTO db.table VALUES (1,2,3)`, // no column list
`UPSERT INTO db.table (a) VALUES ('a string')`, // invalid string literature
`UPSERT INTO db.table (a) VALUES ("a string")`, // should be "\"a string\""
`UPSERT INTO db.table (a) VALUES ("{key:value}")`, // should be "{\"key\:\"value\"}"
`UPSERT INTO db.table (a,b) VALUES (1,2,3)`, // number of field and value mismatch
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_UpsertDefaultDb(t *testing.T) {
name := "Test_parseQuery_UpsertDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
fields []string
values []interface{}
}
testData := map[string]testStruct{
`UPSERT INTO
table1 (a,
b, c, d, e,
f) VALUES
(null, 1.0, true,
"\"a string 'with' \\\"quote\\\"\"", "{\"key\":\"value\"}", "[2.0,null,false,\"a string 'with' \\\"quote\\\"\"]")`: {
dbName: dbName, collName: "table1", fields: []string{"a", "b", "c", "d", "e", "f"}, values: []interface{}{
nil, 1.0, true, `a string 'with' "quote"`, map[string]interface{}{"key": "value"}, []interface{}{2.0, nil, false, `a string 'with' "quote"`},
},
},
`UPSERT
INTO db-2.table_2 (
a,b,c) VALUES ($1,
:3, @2)`: {
dbName: "db-2", collName: "table_2", fields: []string{"a", "b", "c"}, values: []interface{}{
placeholder{1}, placeholder{3}, placeholder{2},
},
},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtInsert); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtInsert", name+"/"+query)
} else if !dbstmt.isUpsert {
t.Fatalf("%s failed: is-upsert must be enabled", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if !reflect.DeepEqual(dbstmt.fields, data.fields) {
t.Fatalf("%s failed: <fields> expected %#v but received %#v", name+"/"+query, data.fields, dbstmt.fields)
} else if !reflect.DeepEqual(dbstmt.values, data.values) {
t.Fatalf("%s failed: <values> expected %#v but received %#v", name+"/"+query, data.values, dbstmt.values)
}
}
invalidQueries := []string{
`UPSERT INTO .table (a,b,c) VALUES (1,2,3)`,
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_Delete(t *testing.T) {
name := "Test_parseQuery_Delete"
type testStruct struct {
dbName string
collName string
idStr string
id interface{}
}
testData := map[string]testStruct{
`DELETE FROM
db1.table1 WHERE
id=abc`: {dbName: "db1", collName: "table1", idStr: "abc", id: nil},
`
DELETE
FROM db-2.table_2
WHERE id="def"`: {dbName: "db-2", collName: "table_2", idStr: "def", id: nil},
`DELETE FROM
db_3-0.table-3_0 WHERE
id=@2`: {dbName: "db_3-0", collName: "table-3_0", idStr: "@2", id: placeholder{2}},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtDelete); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtDelete", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.idStr != data.idStr {
t.Fatalf("%s failed: <id-str> expected %#v but received %#v", name+"/"+query, data.idStr, dbstmt.idStr)
} else if !reflect.DeepEqual(dbstmt.id, data.id) {
t.Fatalf("%s failed: <id> expected %#v but received %#v", name+"/"+query, data.id, dbstmt.id)
}
}
invalidQueries := []string{
`DELETE FROM db WHERE id=1`, // no collection name
`DELETE FROM db.table`, // no WHERE part
`DELETE FROM db.table WHERE id=`, // id is empty
`DELETE FROM db.table WHERE id="1`,
`DELETE FROM db.table WHERE id=2"`,
`DELETE FROM db.table WHERE id=@1 a`,
`DELETE FROM db.table WHERE id=b $2`,
`DELETE FROM db.table WHERE id=c :3 d`,
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_DeleteDefaultDb(t *testing.T) {
name := "Test_parseQuery_DeleteDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
idStr string
id interface{}
}
testData := map[string]testStruct{
`DELETE FROM
table1 WHERE
id=abc`: {dbName: dbName, collName: "table1", idStr: "abc", id: nil},
`
DELETE
FROM db-2.table_2
WHERE id="def"`: {dbName: "db-2", collName: "table_2", idStr: "def", id: nil},
`DELETE FROM
db_3-0.table-3_0 WHERE
id=@2`: {dbName: "db_3-0", collName: "table-3_0", idStr: "@2", id: placeholder{2}},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtDelete); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtDelete", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.idStr != data.idStr {
t.Fatalf("%s failed: <id-str> expected %#v but received %#v", name+"/"+query, data.idStr, dbstmt.idStr)
} else if !reflect.DeepEqual(dbstmt.id, data.id) {
t.Fatalf("%s failed: <id> expected %#v but received %#v", name+"/"+query, data.id, dbstmt.id)
}
}
invalidQueries := []string{
`DELETE FROM .table WHERE id=1`, // no collection name
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_Select(t *testing.T) {
name := "Test_parseQuery_Select"
type testStruct struct {
dbName string
collName string
isCrossPartition bool
selectQuery string
}
testData := map[string]testStruct{
`SELECT * FROM c WITH database=db WITH collection=tbl`: {
dbName: "db", collName: "tbl", isCrossPartition: false, selectQuery: `SELECT * FROM c`},
`SELECT CROSS PARTITION * FROM c WHERE id="1" WITH db=db-1 WITH table=tbl_1`: {
dbName: "db-1", collName: "tbl_1", isCrossPartition: true, selectQuery: `SELECT * FROM c WHERE id="1"`},
`SELECT id,username,email FROM c WHERE username!=@1 AND (id>:2 OR email=$3) WITH CROSS_PARTITION=true WITH database=db WITH table=tbl`: {
dbName: "db", collName: "tbl", isCrossPartition: true, selectQuery: `SELECT id,username,email FROM c WHERE username!=@_1 AND (id>@_2 OR email=@_3)`},
`SELECT a,b,c FROM user u WHERE u.id="1" WITH db=dbtemp`: {
dbName: "dbtemp", collName: "user", isCrossPartition: false, selectQuery: `SELECT a,b,c FROM user u WHERE u.id="1"`},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtSelect); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtSelect", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.isCrossPartition != data.isCrossPartition {
t.Fatalf("%s failed: <cross-partition> expected %#v but received %#v", name+"/"+query, data.isCrossPartition, dbstmt.isCrossPartition)
} else if dbstmt.selectQuery != data.selectQuery {
t.Fatalf("%s failed: <select-query> expected %#v but received %#v", name+"/"+query, data.selectQuery, dbstmt.selectQuery)
}
}
invalidQueries := []string{
`SELECT * FROM db.table`, // database and collection must be specified by WITH database=<dbname> and WITH collection=<collname>
`SELECT * WITH db=dbname`, // no collection
`SELECT * FROM c WITH collection=collname`, // no database
`SELECT * FROM c WITH db=dbname WITH collection=collname WITH cross_partition=false`, // the only valid value for cross_partition is true
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_SelectDefaultDb(t *testing.T) {
name := "Test_parseQuery_SelectDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
isCrossPartition bool
selectQuery string
}
testData := map[string]testStruct{
`SELECT * FROM c WITH collection=tbl`: {
dbName: dbName, collName: "tbl", isCrossPartition: false, selectQuery: `SELECT * FROM c`},
`SELECT CROSS PARTITION * FROM c WHERE id="1" WITH db=db-1 WITH table=tbl_1`: {
dbName: "db-1", collName: "tbl_1", isCrossPartition: true, selectQuery: `SELECT * FROM c WHERE id="1"`},
`SELECT id,username,email FROM c WHERE username!=@1 AND (id>:2 OR email=$3) WITH CROSS_PARTITION=true WITH table=tbl`: {
dbName: dbName, collName: "tbl", isCrossPartition: true, selectQuery: `SELECT id,username,email FROM c WHERE username!=@_1 AND (id>@_2 OR email=@_3)`},
`SELECT a,b,c FROM user u WHERE u.id="1"`: {
dbName: dbName, collName: "user", isCrossPartition: false, selectQuery: `SELECT a,b,c FROM user u WHERE u.id="1"`},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtSelect); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtSelect", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.isCrossPartition != data.isCrossPartition {
t.Fatalf("%s failed: <cross-partition> expected %#v but received %#v", name+"/"+query, data.isCrossPartition, dbstmt.isCrossPartition)
} else if dbstmt.selectQuery != data.selectQuery {
t.Fatalf("%s failed: <select-query> expected %#v but received %#v", name+"/"+query, data.selectQuery, dbstmt.selectQuery)
}
}
}
func Test_parseQuery_Update(t *testing.T) {
name := "Test_parseQuery_Update"
type testStruct struct {
dbName string
collName string
idStr string
id interface{}
fields []string
values []interface{}
}
testData := map[string]testStruct{
`UPDATE db1.table1
SET a=null, b=
1.0, c=true,
d="\"a string 'with' \\\"quote\\\"\"", e="{\"key\":\"value\"}"
,f="[2.0,null,false,\"a string 'with' \\\"quote\\\"\"]" WHERE
id="abc"`: {
dbName: "db1", collName: "table1", fields: []string{"a", "b", "c", "d", "e", "f"}, values: []interface{}{
nil, 1.0, true, `a string 'with' "quote"`, map[string]interface{}{"key": "value"}, []interface{}{2.0, nil, false, `a string 'with' "quote"`},
}, idStr: "abc", id: nil},
`UPDATE db-1.table_1
SET a=$1, b=
$2, c=:3, d=0 WHERE
id=@4`: {
dbName: "db-1", collName: "table_1", fields: []string{"a", "b", "c", "d"}, values: []interface{}{placeholder{1}, placeholder{2}, placeholder{3}, 0.0},
idStr: "@4", id: placeholder{4}},
}
for query, data := range testData {
if stmt, err := parseQuery(nil, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtUpdate); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtUpdate", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.idStr != data.idStr {
t.Fatalf("%s failed: <id-str> expected %#v but received %#v", name+"/"+query, data.idStr, dbstmt.idStr)
} else if dbstmt.id != data.id {
t.Fatalf("%s failed: <id> expected %#v but received %#v", name+"/"+query, data.id, dbstmt.id)
} else if !reflect.DeepEqual(dbstmt.fields, data.fields) {
t.Fatalf("%s failed: <fields> expected %#v but received %#v", name+"/"+query, data.fields, dbstmt.fields)
} else if !reflect.DeepEqual(dbstmt.values, data.values) {
t.Fatalf("%s failed: <values> expected %#v but received %#v", name+"/"+query, data.values, dbstmt.values)
}
}
invalidQueries := []string{
`UPDATE db SET a=1,b=2,c=3 WHERE id=4`, // no collection name
`UPDATE db.table SET a=1,b=2,c=3 WHERE username=4`, // only WHERE id... is accepted
`UPDATE db.table SET a=1,b=2,c=3`, // no WHERE clause
`UPDATE db.table WHERE id=1`, // no SET clause
`UPDATE db.table SET WHERE id=1`, // SET clause is empty
`UPDATE db.table SET a="{key:value}" WHERE id=1`, // should be "{\"key\:\"value\"}"
`UPDATE db.table SET =1 WHERE id=2`, // invalid SET clause
`UPDATE db.table SET a=1 WHERE id= `, // empty id
`UPDATE db.table SET a=1,b=2,c=3 WHERE id="4`, // invalid id literate
}
for _, query := range invalidQueries {
if _, err := parseQuery(nil, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
func Test_parseQuery_UpdateDefaultDb(t *testing.T) {
name := "Test_parseQuery_UpdateDefaultDb"
dbName := "mydb"
type testStruct struct {
dbName string
collName string
idStr string
id interface{}
fields []string
values []interface{}
}
testData := map[string]testStruct{
`UPDATE table1
SET a=null, b=
1.0, c=true,
d="\"a string 'with' \\\"quote\\\"\"", e="{\"key\":\"value\"}"
,f="[2.0,null,false,\"a string 'with' \\\"quote\\\"\"]" WHERE
id="abc"`: {
dbName: dbName, collName: "table1", fields: []string{"a", "b", "c", "d", "e", "f"}, values: []interface{}{
nil, 1.0, true, `a string 'with' "quote"`, map[string]interface{}{"key": "value"}, []interface{}{2.0, nil, false, `a string 'with' "quote"`},
}, idStr: "abc", id: nil},
`UPDATE db-1.table_1
SET a=$1, b=
$2, c=:3, d=0 WHERE
id=@4`: {
dbName: "db-1", collName: "table_1", fields: []string{"a", "b", "c", "d"}, values: []interface{}{placeholder{1}, placeholder{2}, placeholder{3}, 0.0},
idStr: "@4", id: placeholder{4}},
}
for query, data := range testData {
if stmt, err := parseQueryWithDefaultDb(nil, dbName, query); err != nil {
t.Fatalf("%s failed: %s", name+"/"+query, err)
} else if dbstmt, ok := stmt.(*StmtUpdate); !ok {
t.Fatalf("%s failed: the parsed stmt must be of type *StmtUpdate", name+"/"+query)
} else if dbstmt.dbName != data.dbName {
t.Fatalf("%s failed: <db-name> expected %#v but received %#v", name+"/"+query, data.dbName, dbstmt.dbName)
} else if dbstmt.collName != data.collName {
t.Fatalf("%s failed: <collection-name> expected %#v but received %#v", name+"/"+query, data.collName, dbstmt.collName)
} else if dbstmt.idStr != data.idStr {
t.Fatalf("%s failed: <id-str> expected %#v but received %#v", name+"/"+query, data.idStr, dbstmt.idStr)
} else if dbstmt.id != data.id {
t.Fatalf("%s failed: <id> expected %#v but received %#v", name+"/"+query, data.id, dbstmt.id)
} else if !reflect.DeepEqual(dbstmt.fields, data.fields) {
t.Fatalf("%s failed: <fields> expected %#v but received %#v", name+"/"+query, data.fields, dbstmt.fields)
} else if !reflect.DeepEqual(dbstmt.values, data.values) {
t.Fatalf("%s failed: <values> expected %#v but received %#v", name+"/"+query, data.values, dbstmt.values)
}
}
invalidQueries := []string{
`UPDATE .table SET a=1,b=2,c=3 WHERE id=4`,
}
for _, query := range invalidQueries {
if _, err := parseQueryWithDefaultDb(nil, dbName, query); err == nil {
t.Fatalf("%s failed: query must not be parsed/validated successfully", name+"/"+query)
}
}
}
|
package main
import (
"github.com/stretchr/testify/assert"
"math/big"
"testing"
)
type Pair struct {
a int
b int64
}
func TestFib(t *testing.T) {
initdp()
vals := [14]Pair{{1, 1}, {8, 21}, {15, 610}, {22, 17711}, {29, 514229}, {36, 14930352}, {43, 433494437},
{50, 12586269025}, {57, 365435296162}, {64, 10610209857723}, {71, 308061521170129}, {78, 8944394323791464},
{85, 259695496911122585}, {92, 7540113804746346429}}
for i := 0; i < len(vals); i++ {
assert.Equal(t, big.NewInt(vals[i].b), fibonacci(vals[i].a), "Fibonacci not match", vals[i])
}
}
|
package logging
// Config ログ設定構造体.
type Config struct {
filePath string
fileName string
levelFile Level
levelConsole Level
}
// NewConfig ログ設定を作成します.
func NewConfig(filePath, fileName string, levelFile, levelConsole Level) *Config {
return &Config{
filePath,
fileName,
levelFile,
levelConsole,
}
}
|
package main
import (
"fmt"
"github.com/micro/go-config"
)
func main() {
config.LoadFile("config.json")
fmt.Println(config.Map()["mongo"])
}
|
package dlink
import (
"log"
)
func ConfigureDHCPAuto(destination string, user string, _ string) {
t, err := NewTelnet(destination)
if err != nil {
log.Fatalln(err)
}
Login(t, user)
t.Sendln("config dhcp_auto enable")
t.Expect("DGS-3100# ")
WriteConfig(t)
Reboot(t)
}
// func configure(destination string, user string) {
// t, err := NewTelnet(destination)
// t.CheckErr(err)
//
// // var data []byte
// t.Expect("UserName:")
// t.Sendln(user)
// t.Expect("DGS-3100# ")
// t.Sendln("config dhcp_auto enable")
// t.Expect("The configuration will take place on the next time the device will get DHCP address.")
// t.Expect("Success.")
// t.Expect("DGS-3100# ")
// t.Sendln("save")
// t.Expect("Overwrite file [startup-config] ?[Yes/press any key for no]....")
// t.Sendln("yes")
// t.Expect("Success.")
// t.Expect("DGS-3100# ")
// t.Sendln("reboot")
// t.Expect("This action may take a few minutes")
// // t.Expect(t, "Are you sure you want to proceed with system reboot now? (Y/N)[N] ")
// t.Sendln("Y")
// t.Expect("Shutting down ...")
// fmt.Println("Switch configuration done, rebooting...")
// fmt.Println("Please disconnect and move on to the next switch")
// time.Sleep(time.Second * 30)
// fmt.Println("Looking for new switch...")
// // data, err = t.ReadBytes('>')
// // checkErr(err)
// // os.Stdout.Write(data)
// // os.Stdout.WriteString("\n")
// }
|
package main
import (
"sort"
"testing"
)
func TestAlphabetic(t *testing.T) {
for _, tc := range []struct{
name string
strings alphabetic
want []string
} {
{
name: "basic",
strings:alphabetic{"a.b", "d.c", "b.e"},
want: []string{"a.b", "b.e", "d.c"},
},
{
name: "withKeysAndValues",
strings:alphabetic{"bKey.subKey=value", "aKey.sub=value", "cKey.subKey=value"},
want: []string{"aKey.sub=value", "bKey.subKey=value", "cKey.subKey=value"},
},
} {
t.Run(tc.name, func(t *testing.T) {
sort.Sort(tc.strings)
for idx := range tc.strings {
if tc.strings[idx] != tc.want[idx] {
t.Fatalf("incorrect sort\n expected %s\n got %s", tc.strings[idx], tc.want[idx])
}
}
})
}
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"context"
"fmt"
"sort"
"strings"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/utils"
moby "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
)
func (s *composeService) List(ctx context.Context, opts api.ListOptions) ([]api.Stack, error) {
list, err := s.apiClient().ContainerList(ctx, moby.ContainerListOptions{
Filters: filters.NewArgs(hasProjectLabelFilter(), hasConfigHashLabel()),
All: opts.All,
})
if err != nil {
return nil, err
}
return containersToStacks(list)
}
func containersToStacks(containers []moby.Container) ([]api.Stack, error) {
containersByLabel, keys, err := groupContainerByLabel(containers, api.ProjectLabel)
if err != nil {
return nil, err
}
var projects []api.Stack
for _, project := range keys {
configFiles, err := combinedConfigFiles(containersByLabel[project])
if err != nil {
logrus.Warn(err.Error())
configFiles = "N/A"
}
projects = append(projects, api.Stack{
ID: project,
Name: project,
Status: combinedStatus(containerToState(containersByLabel[project])),
ConfigFiles: configFiles,
})
}
return projects, nil
}
func combinedConfigFiles(containers []moby.Container) (string, error) {
configFiles := []string{}
for _, c := range containers {
files, ok := c.Labels[api.ConfigFilesLabel]
if !ok {
return "", fmt.Errorf("No label %q set on container %q of compose project", api.ConfigFilesLabel, c.ID)
}
for _, f := range strings.Split(files, ",") {
if !utils.StringContains(configFiles, f) {
configFiles = append(configFiles, f)
}
}
}
return strings.Join(configFiles, ","), nil
}
func containerToState(containers []moby.Container) []string {
statuses := []string{}
for _, c := range containers {
statuses = append(statuses, c.State)
}
return statuses
}
func combinedStatus(statuses []string) string {
nbByStatus := map[string]int{}
keys := []string{}
for _, status := range statuses {
nb, ok := nbByStatus[status]
if !ok {
nb = 0
keys = append(keys, status)
}
nbByStatus[status] = nb + 1
}
sort.Strings(keys)
result := ""
for _, status := range keys {
nb := nbByStatus[status]
if result != "" {
result += ", "
}
result += fmt.Sprintf("%s(%d)", status, nb)
}
return result
}
func groupContainerByLabel(containers []moby.Container, labelName string) (map[string][]moby.Container, []string, error) {
containersByLabel := map[string][]moby.Container{}
keys := []string{}
for _, c := range containers {
label, ok := c.Labels[labelName]
if !ok {
return nil, nil, fmt.Errorf("No label %q set on container %q of compose project", labelName, c.ID)
}
labelContainers, ok := containersByLabel[label]
if !ok {
labelContainers = []moby.Container{}
keys = append(keys, label)
}
labelContainers = append(labelContainers, c)
containersByLabel[label] = labelContainers
}
sort.Strings(keys)
return containersByLabel, keys, nil
}
|
package tempconv
// CToF converts a Celsius temperature to a Fahrenheit
func CToF(c Celsius) Fahrenheit { return Fahrenheit(c*9/5 + 32) }
// FToC converts a Fahrenheit temperature to a Celsius
func FToC(f Fahrenheit) Celsius { return Celsius((f - 32) * 5 / 9) }
// KToC converts a Kelvin temperature to a Celsius
func KToC(k Kelvin) Celsius { return Celsius(k - 273.15) }
// CToK converts a Celsius temperature to a Kelvin
func CToK(c Celsius) Kelvin { return Kelvin(c + 273.15) }
|
package subscraping
import (
"context"
"crypto/tls"
"net/http"
"time"
)
// NewSession creates a new session object for a domain
func NewSession(domain string, keys Keys, timeout int) (*Session, error) {
client := &http.Client{
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
Timeout: time.Duration(timeout) * time.Second,
}
session := &Session{
Client: client,
Keys: keys,
}
// Create a new extractor object for the current domain
extractor, err := NewSubdomainExtractor(domain)
session.Extractor = extractor
return session, err
}
// NormalGetWithContext makes a normal GET request to a URL with context
func (s *Session) NormalGetWithContext(ctx context.Context, url string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, err
}
// Don't randomize user agents, as they cause issues sometimes
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36")
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Language", "en")
resp, err := s.Client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
}
// Get makes a GET request to a URL
func (s *Session) Get(ctx context.Context, url string, cookies string, headers map[string]string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36")
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Language", "en")
if cookies != "" {
req.Header.Set("Cookie", cookies)
}
if headers != nil {
for key, value := range headers {
req.Header.Set(key, value)
}
}
resp, err := s.Client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
}
|
package client_test
import (
"context"
"crypto/x509"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
"github.com/EventStore/EventStore-Client-Go/stream_position"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/EventStore/EventStore-Client-Go/client"
"github.com/EventStore/EventStore-Client-Go/direction"
)
func TestTLSDefaults(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.Error(t, err)
assert.Contains(t, err.Error(), "certificate signed by unknown authority")
}
func TestTLSDefaultsWithCertificate(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
b, err := ioutil.ReadFile("../certs/node/node.crt")
if err != nil {
t.Fatalf("failed to read node certificate ../certs/node/node.crt: %s", err.Error())
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
t.Fatalf("failed to append node certificates: %s", err.Error())
}
config.RootCAs = cp
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.NoError(t, err)
}
func TestTLSWithoutCertificateAndVerify(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s?tls=true&tlsverifycert=false", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.NoError(t, err)
}
func TestTLSWithoutCertificate(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s?tls=true&tlsverifycert=true", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.Error(t, err)
assert.Contains(t, err.Error(), "certificate signed by unknown authority")
}
func TestTLSWithCertificate(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s?tls=true&tlsverifycert=true", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
b, err := ioutil.ReadFile("../certs/node/node.crt")
if err != nil {
t.Fatalf("failed to read node certificate ../certs/node/node.crt: %s", err.Error())
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
t.Fatalf("failed to append node certificates: %s", err.Error())
}
config.RootCAs = cp
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.NoError(t, err)
}
func TestTLSWithCertificateFromAbsoluteFile(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
absPath, err := filepath.Abs("../certs/node/node.crt")
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
s := fmt.Sprintf("esdb://admin:changeit@%s?tls=true&tlsverifycert=true&tlsCAFile=%s", container.Endpoint, absPath)
config, err := client.ParseConnectionString(s)
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.NoError(t, err)
}
func TestTLSWithCertificateFromRelativeFile(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s?tls=true&tlsverifycert=true&tlsCAFile=../certs/node/node.crt", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.NoError(t, err)
}
func TestTLSWithInvalidCertificate(t *testing.T) {
container := GetEmptyDatabase()
defer container.Close()
config, err := client.ParseConnectionString(fmt.Sprintf("esdb://admin:changeit@%s?tls=true&tlsverifycert=true", container.Endpoint))
if err != nil {
t.Fatalf("Unexpected configuration error: %s", err.Error())
}
b, err := ioutil.ReadFile("../certs/untrusted-ca/ca.crt")
if err != nil {
t.Fatalf("failed to read node certificate ../certs/untrusted-ca/ca.crt: %s", err.Error())
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(b) {
t.Fatalf("failed to append node certificates: %s", err.Error())
}
config.RootCAs = cp
c, err := client.NewClient(config)
if err != nil {
t.Fatalf("Unexpected error: %s", err.Error())
}
numberOfEventsToRead := 1
numberOfEvents := uint64(numberOfEventsToRead)
_, err = c.ReadAllEvents(context.Background(), direction.Backwards, stream_position.Start{}, numberOfEvents, true)
require.Error(t, err)
assert.Contains(t, err.Error(), "certificate signed by unknown authority")
}
|
package app
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsInvalidRequestError(t *testing.T) {
stdErr := errors.New("simple error")
assert.False(t, IsInvalidRequestError(stdErr))
irErr := InvalidRequestError("invalid request")
assert.True(t, IsInvalidRequestError(irErr))
wrapperErr := fmt.Errorf("wrapping message: %w", irErr)
assert.True(t, IsInvalidRequestError(wrapperErr))
}
|
package main
import (
"os"
"log"
"encoding/csv"
"io"
"fmt"
)
func main() {
irisCsv,err:=os.Open("readingcsv/data/iris.csv")
if err!=nil{
log.Fatal(err)
}
reader:=csv.NewReader(irisCsv)
reader.FieldsPerRecord=-1
var irisData [][]string
for {
record,err:=reader.Read()
if err!=nil {
if err==io.EOF {
break
}
log.Fatal(err)
}
irisData=append(irisData,record)
}
fmt.Println(irisData)
}
|
package client
type TankMessage map[string]interface{}
func (m TankMessage) Id() int64 {
return int64(m.Tank()["Id"].(float64))
}
func (m TankMessage) Tank() Message {
return Message(m["Tank"].(map[string]interface{}))
}
|
package main
import (
"fmt"
"log"
"time"
)
func main2() {
model := QueuedScheduler{}
model.Run()
for i := 0; i < 10 ; i++ {
worker := make(chan int)
go func(i int) {
for{
model.WorkerReady(worker) // 去分发一个worker 让request 分发
request := <- worker // 收到10个
time.Sleep(time.Second)
log.Printf("worker%d: result_ %v",i,request)
}
}(i)
}
in := 0
for{
for i := 0; i < 14 ; i++ {
model.Submit(in)
}
time.Sleep(time.Second*10)
in++
}
}
func main() {
a := make(chan int,1)
b := a
go func() {
b <-22
}()
res := <-b
fmt.Println(res);
}
func (s *QueuedScheduler) WorkerReady(w chan int) {
s.workerChan <- w // 第一步 10 个worker
}
func (s *QueuedScheduler)Run() {
s.requestChan = make(chan int,1)
s.workerChan = make(chan chan int,1)
go func() {
var requestQ []int
var workerQ []chan int // 一开始10个队列
for{
var activeRequest int
var activeWorker chan int
//fmt.Println(len(requestQ),len(workerQ));
if len(requestQ) > 0 && len(workerQ) > 0{ // 第5步
fmt.Println(len(requestQ),len(workerQ));
activeRequest = requestQ[0]
activeWorker = workerQ[0] //chan 直接是引用传递 ,此时activeWorker 就是通道worker
//fmt.Println(1111,activeWorker); //
}
select {
case r := <-s.requestChan:
requestQ = append(requestQ,r)
case w := <-s.workerChan: // 第1步
workerQ = append(workerQ,w)
case activeWorker <- activeRequest: // 第6步
requestQ = requestQ[1:]
workerQ = workerQ[1:]
//fmt.Println(2222,activeWorker);
}
}
}()
}
type QueuedScheduler struct {
requestChan chan int
workerChan chan chan int
}
func (s *QueuedScheduler) Submit(r int) {
s.requestChan <- r // 第二步
}
|
package routing
import (
"allbooks/models"
"strings"
)
type CollectionContext struct {
Context
models.CollectionContext
}
type CollectionAction func(context *CollectionContext)
func NewCollectionRoute(name, method, pattern string, action CollectionAction) Route {
wrapperAction := func(context Context) {
collectionContext := context.(*CollectionContext)
action(collectionContext)
}
collectionName := strings.ToLower(name)
setCollectionName := func(action Action) Action {
return func(context Context) {
collectionContext := ToCollectionContext(context)
collectionContext.SetCollectionName(collectionName)
action(collectionContext)
}
}
sortAction := Sorting(wrapperAction)
paginationAction := Pagination(sortAction)
filteringAction := Filtering(paginationAction)
return BasicRoute{
name, method, pattern, setCollectionName(filteringAction),
}
}
func ToCollectionContext(context Context) *CollectionContext {
switch context.(type) {
case *CollectionContext:
return context.(*CollectionContext)
default:
return &CollectionContext{context, models.NewCollectionContext()}
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vulkan
import (
"context"
"fmt"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/stream"
"github.com/google/gapid/core/stream/fmts"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/resolve"
"github.com/google/gapid/gapis/service/path"
"github.com/google/gapid/gapis/vertex"
)
// drawCallMesh builds a mesh for dc at p.
func drawCallMesh(ctx context.Context, dc *VkQueueSubmit, p *path.Mesh, r *path.ResolveConfig) (*api.Mesh, error) {
cmdPath := path.FindCommand(p)
if cmdPath == nil {
log.W(ctx, "Couldn't find command at path '%v'", p)
return nil, nil
}
s, err := resolve.GlobalState(ctx, cmdPath.GlobalStateAfter(), r)
if err != nil {
return nil, err
}
c := getStateObject(s)
lastQueue := c.LastBoundQueue()
if lastQueue.IsNil() {
return nil, fmt.Errorf("No previous queue submission")
}
lastDrawInfo, ok := c.LastDrawInfos().Lookup(lastQueue.VulkanHandle())
if !ok {
return nil, fmt.Errorf("There have been no previous draws")
}
// Get the draw primitive from the currente graphics pipeline
if lastDrawInfo.GraphicsPipeline().IsNil() {
return nil, fmt.Errorf("Cannot find last used graphics pipeline")
}
drawPrimitive := func() api.DrawPrimitive {
switch lastDrawInfo.GraphicsPipeline().InputAssemblyState().Topology() {
case VkPrimitiveTopology_VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
return api.DrawPrimitive_Points
case VkPrimitiveTopology_VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
return api.DrawPrimitive_Lines
case VkPrimitiveTopology_VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
return api.DrawPrimitive_LineStrip
case VkPrimitiveTopology_VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
return api.DrawPrimitive_Triangles
case VkPrimitiveTopology_VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
return api.DrawPrimitive_TriangleStrip
case VkPrimitiveTopology_VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
return api.DrawPrimitive_TriangleFan
}
return api.DrawPrimitive_Points
}()
// Index buffer
ib := &api.IndexBuffer{}
// Vertex buffer streams
vb := &vertex.Buffer{}
stats := &api.Mesh_Stats{}
noData := p.GetOptions().GetExcludeData()
// In total there are four kinds of draw calls: vkCmdDraw, vkCmdDrawIndexed,
// vkCmdDrawIndirect, vkCmdDrawIndexedIndirect. Each is processed in one of
// the branches.
if p := lastDrawInfo.CommandParameters().Draw(); !p.IsNil() {
// Last draw call is vkCmdDraw
// Generate an index buffer with value: 0, 1, 2, 3 ... vertexCount-1
var indices []uint32
if !noData {
indices := make([]uint32, p.VertexCount())
for i := range indices {
indices[i] = uint32(i)
}
}
ib = &api.IndexBuffer{Indices: indices}
// Get the current bound vertex buffers
vb, err = getVertexBuffers(ctx, s, dc.Thread(), p.VertexCount(), p.FirstVertex(), noData)
if err != nil {
return nil, err
}
stats.Vertices = p.VertexCount()
stats.Primitives = drawPrimitive.Count(p.VertexCount())
} else if p := lastDrawInfo.CommandParameters().DrawIndexed(); !p.IsNil() {
// Last draw call is vkCmdDrawIndexed
// Get the current bound index buffer
if lastDrawInfo.BoundIndexBuffer().BoundBuffer().Buffer().IsNil() {
return nil, fmt.Errorf("Cannot find last used index buffer")
}
var indices []uint32
if !noData {
indices, err = getIndicesData(ctx, s, dc.Thread(), lastDrawInfo.BoundIndexBuffer(), p.IndexCount(), p.FirstIndex(), p.VertexOffset())
if err != nil {
return nil, err
}
}
// Calculate the vertex count and the first vertex
maxIndex := uint32(0)
minIndex := uint32(0xFFFFFFFF)
uniqueIndices := make(map[uint32]bool)
for _, i := range indices {
if maxIndex < i {
maxIndex = i
}
if i < minIndex {
minIndex = i
}
uniqueIndices[i] = true
}
vertexCount := maxIndex - minIndex + 1
// Get the current bound vertex buffers
vb, err = getVertexBuffers(ctx, s, dc.Thread(), vertexCount, minIndex, noData)
if err != nil {
return nil, err
}
// Shift indices, as we only extract the vertex data from minIndex to
// maxIndex, we need to minus the minimum index value make the new indices
// value valid for the extracted vertices value.
shiftedIndices := make([]uint32, len(indices))
for i, index := range indices {
shiftedIndices[i] = index - minIndex
}
ib = &api.IndexBuffer{
Indices: shiftedIndices,
}
stats.Vertices = uint32(len(uniqueIndices))
stats.Indices = p.IndexCount()
stats.Primitives = drawPrimitive.Count(p.IndexCount())
} else if p := lastDrawInfo.CommandParameters().DrawIndirect(); !p.IsNil() {
return nil, fmt.Errorf("Draw mesh for vkCmdDrawIndirect not implemented")
} else if p := lastDrawInfo.CommandParameters().DrawIndexedIndirect(); !p.IsNil() {
return nil, fmt.Errorf("Draw mesh for vkCmdDrawIndexedIndirect not implemented")
} else if p := lastDrawInfo.CommandParameters().DrawIndirectCountKHR(); !p.IsNil() {
return nil, fmt.Errorf("Draw mesh for vkCmdDrawIndirectCountKHR not implemented")
} else if p := lastDrawInfo.CommandParameters().DrawIndexedIndirectCountKHR(); !p.IsNil() {
return nil, fmt.Errorf("Draw mesh for vkCmdDrawIndexedIndirectCountKHR not implemented")
} else if p := lastDrawInfo.CommandParameters().DrawIndirectCountAMD(); !p.IsNil() {
return nil, fmt.Errorf("Draw mesh for vkCmdDrawIndirectCountAMD not implemented")
} else if p := lastDrawInfo.CommandParameters().DrawIndexedIndirectCountAMD(); !p.IsNil() {
return nil, fmt.Errorf("Draw mesh for vkCmdDrawIndexedIndirectCountAMD not implemented")
}
guessSemantics(vb, p.Options.Hints())
mesh := &api.Mesh{
DrawPrimitive: drawPrimitive,
VertexBuffer: vb,
IndexBuffer: ib,
Stats: stats,
}
if p.Options != nil && p.Options.Faceted {
return mesh.Faceted(ctx)
}
return mesh, nil
}
func getIndicesData(ctx context.Context, s *api.GlobalState, thread uint64, boundIndexBuffer BoundIndexBufferʳ, indexCount, firstIndex uint32, vertexOffset int32) ([]uint32, error) {
backingMem := boundIndexBuffer.BoundBuffer().Buffer().Memory()
if backingMem.IsNil() {
return []uint32{}, nil
}
extractIndices := func(sizeOfIndex uint64) ([]uint32, error) {
indices := []uint32{}
size := uint64(indexCount) * sizeOfIndex
backingMemoryPieces, err := subGetBufferBoundMemoryPiecesInRange(
ctx, nil, api.CmdNoID, nil, s, nil, thread, nil, nil, boundIndexBuffer.BoundBuffer().Buffer(),
boundIndexBuffer.BoundBuffer().Offset()+VkDeviceSize(uint64(firstIndex)*sizeOfIndex),
VkDeviceSize(size))
if err != nil {
return []uint32{}, err
}
rawIndicesData := make([]byte, 0, uint64(indexCount)*sizeOfIndex)
// In the order of the offsets in the buffer
for _, bufOffset := range backingMemoryPieces.Keys() {
piece := backingMemoryPieces.Get(bufOffset)
data, err := piece.DeviceMemory().Data().Slice(
uint64(piece.MemoryOffset()),
uint64(piece.MemoryOffset()+piece.Size())).Read(ctx, nil, s, nil)
if err != nil {
return []uint32{}, err
}
rawIndicesData = append(rawIndicesData, data...)
}
if uint64(len(rawIndicesData)) < size {
log.E(ctx, "Shadow memory of index buffer is not big enough")
return []uint32{}, nil
}
for i := uint64(0); (i < size) && (i+sizeOfIndex-1 < size); i += sizeOfIndex {
index := int32(0)
for j := uint64(0); j < sizeOfIndex; j++ {
if i+j > uint64(len(rawIndicesData)) {
return nil, err
}
oneByte := rawIndicesData[i+j]
index += int32(oneByte) << (8 * j)
}
index += vertexOffset
if index < 0 {
// TODO(qining): The index value is invalid, need to emit error mesage
// here.
index = 0
}
indices = append(indices, uint32(index))
}
return indices, nil
}
switch boundIndexBuffer.Type() {
case VkIndexType_VK_INDEX_TYPE_UINT16:
return extractIndices(2)
case VkIndexType_VK_INDEX_TYPE_UINT32:
return extractIndices(4)
}
return []uint32{}, nil
}
func getVertexBuffers(ctx context.Context, s *api.GlobalState, thread uint64,
vertexCount, firstVertex uint32, noData bool) (*vertex.Buffer, error) {
if !noData && vertexCount == 0 {
return nil, fmt.Errorf("Number of vertices must be greater than 0")
}
c := getStateObject(s)
lastQueue := c.LastBoundQueue()
if lastQueue.IsNil() {
return nil, fmt.Errorf("No previous queue submission")
}
lastDrawInfo, ok := c.LastDrawInfos().Lookup(lastQueue.VulkanHandle())
if !ok {
return nil, fmt.Errorf("There have been no previous draws")
}
vb := &vertex.Buffer{}
attributes := lastDrawInfo.GraphicsPipeline().VertexInputState().AttributeDescriptions()
bindings := lastDrawInfo.GraphicsPipeline().VertexInputState().BindingDescriptions()
var err error
// For each attribute, get the vertex buffer data
for _, attributeIndex := range attributes.Keys() {
attribute := attributes.Get(attributeIndex)
if !bindings.Contains(attribute.Binding()) {
// TODO(qining): This is an error, should emit error message here.
continue
}
binding := bindings.Get(attribute.Binding())
if !lastDrawInfo.BoundVertexBuffers().Contains(binding.Binding()) {
// TODO(qining): This is an error, should emit error message here.
continue
}
var vertexData []byte
if !noData {
boundVertexBuffer := lastDrawInfo.BoundVertexBuffers().Get(binding.Binding())
vertexData, err = getVerticesData(ctx, s, thread, boundVertexBuffer,
vertexCount, firstVertex, binding, attribute)
if err != nil {
return nil, err
}
}
if noData || vertexData != nil {
translatedFormat, err := translateVertexFormat(attribute.Fmt())
if err != nil {
// TODO(qining): This is an error, should emit error message here
continue
}
// TODO: We can disassemble the shader to pull out the debug name if the
// shader has debug info.
name := fmt.Sprintf("binding=%v, location=%v", binding.Binding(), attribute.Location())
vb.Streams = append(vb.Streams,
&vertex.Stream{
Name: name,
Data: vertexData,
Format: translatedFormat,
Semantic: &vertex.Semantic{},
})
}
}
return vb, nil
}
func getVerticesData(ctx context.Context, s *api.GlobalState, thread uint64,
boundVertexBuffer BoundBuffer, vertexCount, firstVertex uint32,
binding VkVertexInputBindingDescription,
attribute VkVertexInputAttributeDescription) ([]byte, error) {
if vertexCount == 0 {
return nil, fmt.Errorf("Number of vertices must be greater than 0")
}
if binding.InputRate() == VkVertexInputRate_VK_VERTEX_INPUT_RATE_INSTANCE {
// Instanced draws are not supported, but the first instance's geometry
// might be still useful. So we ignore any bindings with a instance rate,
// but do not report an error.
return nil, nil
}
sliceSize := uint64(boundVertexBuffer.Range())
formatElementAndTexelBlockSize, err :=
subGetElementAndTexelBlockSize(ctx, nil, api.CmdNoID, nil, s, nil, thread, nil, nil, attribute.Fmt())
if err != nil {
return nil, err
}
perVertexSize := uint64(formatElementAndTexelBlockSize.ElementSize())
stride := uint64(binding.Stride())
compactOutputSize := perVertexSize * uint64(vertexCount)
out := make([]byte, compactOutputSize)
fullSize := uint64(vertexCount-1)*stride + perVertexSize
offset := uint64(attribute.Offset()) + (uint64(firstVertex) * stride)
if offset >= sliceSize || offset+fullSize > sliceSize {
// We do not actually have a big enough buffer for this. Return
// our zero-initialized buffer.
return out, fmt.Errorf("Vertex data is out of range")
}
backingMemoryPieces, err := subGetBufferBoundMemoryPiecesInRange(
ctx, nil, api.CmdNoID, nil, s, nil, thread, nil, nil, boundVertexBuffer.Buffer(),
boundVertexBuffer.Offset()+VkDeviceSize(offset),
VkDeviceSize(fullSize))
if err != nil {
return nil, err
}
rawData := make([]byte, 0, fullSize)
for _, bo := range backingMemoryPieces.Keys() {
ds := uint64(backingMemoryPieces.Get(bo).MemoryOffset())
de := uint64(backingMemoryPieces.Get(bo).Size()) + ds
data, err := backingMemoryPieces.Get(bo).DeviceMemory().Data().Slice(ds, de).Read(ctx, nil, s, nil)
if err != nil {
return nil, err
}
rawData = append(rawData, data...)
}
if err != nil {
return nil, err
}
if stride > perVertexSize {
// There are gaps between vertices.
for i := uint64(0); i < uint64(vertexCount) && i*stride < uint64(len(rawData)); i++ {
copy(out[i*perVertexSize:(i+1)*perVertexSize], rawData[i*stride:])
}
} else {
// No gap between each vertex.
copy(out, rawData)
}
return out, nil
}
// Translate Vulkan vertex buffer format. Vulkan uses RGBA formats for vertex
// data, the mapping from RGBA channels to XYZW channels are done here.
func translateVertexFormat(vkFormat VkFormat) (*stream.Format, error) {
switch vkFormat {
case VkFormat_VK_FORMAT_R8_UNORM:
return fmts.X_U8_NORM, nil
case VkFormat_VK_FORMAT_R8_SNORM:
return fmts.X_S8_NORM, nil
case VkFormat_VK_FORMAT_R8_UINT:
return fmts.X_U8, nil
case VkFormat_VK_FORMAT_R8_SINT:
return fmts.X_S8, nil
case VkFormat_VK_FORMAT_R8G8_UNORM:
return fmts.XY_U8_NORM, nil
case VkFormat_VK_FORMAT_R8G8_SNORM:
return fmts.XY_S8_NORM, nil
case VkFormat_VK_FORMAT_R8G8_UINT:
return fmts.XY_U8, nil
case VkFormat_VK_FORMAT_R8G8_SINT:
return fmts.XY_S8, nil
case VkFormat_VK_FORMAT_R8G8B8A8_UNORM:
return fmts.XYZW_U8_NORM, nil
case VkFormat_VK_FORMAT_R8G8B8A8_SNORM:
return fmts.XYZW_S8_NORM, nil
case VkFormat_VK_FORMAT_R8G8B8A8_UINT:
return fmts.XYZW_U8, nil
case VkFormat_VK_FORMAT_R8G8B8A8_SINT:
return fmts.XYZW_S8, nil
case VkFormat_VK_FORMAT_B8G8R8A8_UNORM:
return fmts.XYZW_U8_NORM, nil
case VkFormat_VK_FORMAT_R16_UNORM:
return fmts.X_U16_NORM, nil
case VkFormat_VK_FORMAT_R16_SNORM:
return fmts.X_S16_NORM, nil
case VkFormat_VK_FORMAT_R16_UINT:
return fmts.X_U16, nil
case VkFormat_VK_FORMAT_R16_SINT:
return fmts.X_S16, nil
case VkFormat_VK_FORMAT_R16_SFLOAT:
return fmts.X_F16, nil
case VkFormat_VK_FORMAT_R16G16_UNORM:
return fmts.XY_U16_NORM, nil
case VkFormat_VK_FORMAT_R16G16_SNORM:
return fmts.XY_S16_NORM, nil
case VkFormat_VK_FORMAT_R16G16_UINT:
return fmts.XY_U16, nil
case VkFormat_VK_FORMAT_R16G16_SINT:
return fmts.XY_S16, nil
case VkFormat_VK_FORMAT_R16G16_SFLOAT:
return fmts.XY_F16, nil
case VkFormat_VK_FORMAT_R16G16B16A16_UNORM:
return fmts.XYZW_U16_NORM, nil
case VkFormat_VK_FORMAT_R16G16B16A16_SNORM:
return fmts.XYZW_S16_NORM, nil
case VkFormat_VK_FORMAT_R16G16B16A16_UINT:
return fmts.XYZW_U16, nil
case VkFormat_VK_FORMAT_R16G16B16A16_SINT:
return fmts.XYZW_S16, nil
case VkFormat_VK_FORMAT_R16G16B16A16_SFLOAT:
return fmts.XYZW_F16, nil
case VkFormat_VK_FORMAT_R32_UINT:
return fmts.X_U32, nil
case VkFormat_VK_FORMAT_R32_SINT:
return fmts.X_S32, nil
case VkFormat_VK_FORMAT_R32_SFLOAT:
return fmts.X_F32, nil
case VkFormat_VK_FORMAT_R32G32_UINT:
return fmts.XY_U32, nil
case VkFormat_VK_FORMAT_R32G32_SINT:
return fmts.XY_S32, nil
case VkFormat_VK_FORMAT_R32G32_SFLOAT:
return fmts.XY_F32, nil
case VkFormat_VK_FORMAT_R32G32B32_UINT:
return fmts.XYZ_U32, nil
case VkFormat_VK_FORMAT_R32G32B32_SINT:
return fmts.XYZ_S32, nil
case VkFormat_VK_FORMAT_R32G32B32_SFLOAT:
return fmts.XYZ_F32, nil
case VkFormat_VK_FORMAT_R32G32B32A32_UINT:
return fmts.XYZW_U32, nil
case VkFormat_VK_FORMAT_R32G32B32A32_SINT:
return fmts.XYZW_S32, nil
case VkFormat_VK_FORMAT_R32G32B32A32_SFLOAT:
return fmts.XYZW_F32, nil
// TODO(qining): Support packed format
case VkFormat_VK_FORMAT_A8B8G8R8_UNORM_PACK32,
VkFormat_VK_FORMAT_A8B8G8R8_SNORM_PACK32,
VkFormat_VK_FORMAT_A8B8G8R8_UINT_PACK32,
VkFormat_VK_FORMAT_A8B8G8R8_SINT_PACK32,
VkFormat_VK_FORMAT_A2B10G10R10_UNORM_PACK32:
return nil, fmt.Errorf("Packed format not supported yet")
default:
return nil, fmt.Errorf("Unsupported format as vertex format")
}
}
func guessSemantics(vb *vertex.Buffer, hints map[string]vertex.Semantic_Type) {
// TODO: We may disassemble the shader to pull out the debug name to help
// this semantics guessing, if the shader has debug info.
numOfElementsToSemanticTypes := map[uint32][]vertex.Semantic_Type{
4: {vertex.Semantic_Position,
vertex.Semantic_Normal,
vertex.Semantic_Color},
3: {vertex.Semantic_Position,
vertex.Semantic_Normal,
vertex.Semantic_Color},
2: {vertex.Semantic_Position,
vertex.Semantic_Texcoord},
}
taken := map[vertex.Semantic_Type]bool{}
if len(hints) > 0 {
for _, s := range vb.Streams {
if t, ok := hints[s.Name]; ok && !taken[t] {
s.Semantic.Type = t
taken[t] = true
}
}
}
for _, s := range vb.Streams {
if !needsGuess(s) {
continue
}
numOfElements := uint32(len(s.Format.Components))
for _, t := range numOfElementsToSemanticTypes[numOfElements] {
if taken[t] {
continue
}
s.Semantic.Type = t
taken[t] = true
break
}
}
}
func needsGuess(s *vertex.Stream) bool {
return s.Semantic.Type == vertex.Semantic_Unknown
}
|
// Copyright 2022 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/gravitational/teleport/api/types"
)
func TestSliceMatchesRegex(t *testing.T) {
for _, test := range []struct {
input string
exprs []string
matches bool
assert require.ErrorAssertionFunc
}{
{
input: "test|staging",
exprs: []string{"test|staging"}, // treated as a literal string
matches: true,
assert: require.NoError,
},
{
input: "test",
exprs: []string{"^test|staging$"}, // treated as a regular expression due to ^ $
matches: true,
assert: require.NoError,
},
{
input: "staging",
exprs: []string{"^test|staging$"}, // treated as a regular expression due to ^ $
matches: true,
assert: require.NoError,
},
{
input: "test-foo",
exprs: []string{"test-*"}, // treated as a glob pattern due to missing ^ $
matches: true,
assert: require.NoError,
},
{
input: "foo-test",
exprs: []string{"test-*"}, // treated as a glob pattern due to missing ^ $
matches: false,
assert: require.NoError,
},
{
input: "foo",
exprs: []string{"^[$"}, // invalid regex, should error
matches: false,
assert: require.Error,
},
} {
t.Run(test.input, func(t *testing.T) {
matches, err := SliceMatchesRegex(test.input, test.exprs)
test.assert(t, err)
require.Equal(t, test.matches, matches)
})
}
}
func TestKubeResourceMatchesRegex(t *testing.T) {
tests := []struct {
name string
input types.KubernetesResource
resources []types.KubernetesResource
matches bool
assert require.ErrorAssertionFunc
}{
{
name: "input matches single resource",
input: types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: "default",
Name: "podname",
},
resources: []types.KubernetesResource{
{
Kind: types.KindKubePod,
Namespace: "default",
Name: "podname",
},
},
assert: require.NoError,
matches: true,
},
{
name: "input matches last resource",
input: types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: "default",
Name: "podname",
},
resources: []types.KubernetesResource{
{
Kind: types.KindKubePod,
Namespace: "other_namespace",
Name: "podname",
},
{
Kind: types.KindKubePod,
Namespace: "default",
Name: "other_pod",
},
{
Kind: types.KindKubePod,
Namespace: "default",
Name: "podname",
},
},
assert: require.NoError,
matches: true,
},
{
name: "input matches regex expression",
input: types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: "default-5",
Name: "podname-5",
},
resources: []types.KubernetesResource{
{
Kind: types.KindKubePod,
Namespace: "defa*",
Name: "^podname-[0-9]+$",
},
},
assert: require.NoError,
matches: true,
},
{
name: "input has no matchers",
input: types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: "default",
Name: "pod-name",
},
resources: []types.KubernetesResource{
{
Kind: types.KindKubePod,
Namespace: "default",
Name: "^pod-[0-9]+$",
},
},
assert: require.NoError,
matches: false,
},
{
name: "invalid regex expression",
input: types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: "default-5",
Name: "podname-5",
},
resources: []types.KubernetesResource{
{
Kind: types.KindKubePod,
Namespace: "defa*",
Name: "^podname-[0-+$",
},
},
assert: require.Error,
},
{
name: "resource with different kind",
input: types.KubernetesResource{
Kind: types.KindKubePod,
Namespace: "default",
Name: "podname",
},
resources: []types.KubernetesResource{
{
Kind: "other_type",
Namespace: "default",
Name: "podname",
},
},
assert: require.NoError,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := KubeResourceMatchesRegex(tt.input, tt.resources)
tt.assert(t, err)
require.Equal(t, tt.matches, got)
})
}
}
|
// +build ignore
package humanize
import "os"
var fl, err = os.Create("/some/path")
|
package model_test
import (
"testing"
"github.com/apex/log"
"github.com/ooni/probe-cli/v3/internal/engine/model"
)
func TestPrinterCallbacksCallbacks(t *testing.T) {
printer := model.NewPrinterCallbacks(log.Log)
printer.OnProgress(0.4, "progress")
}
|
package events
import "time"
func (e *Event) TimeOffset() time.Duration {
return time.Duration(e.Time) * time.Millisecond
}
|
package dynamic_programming
import (
"math"
"testing"
)
//石子游戏
func stoneGame(piles []int) bool {
n := len(piles)
//dp[i][j]表示i到j之间Alex最多可以赢Lee的分数
dp := make([][]int, n)
for i := 0; i < n; i++ {
dp[i] = make([]int, n)
}
for i := 0; i < n; i++ {
dp[i][i] = piles[i]
}
for i := 1; i < n; i++ {
for j := 0; j < n-i; j++ {
//选用piles[j]与 Lee的选择dp[j+1][i+j]的值
dp[j][i+j] = int(math.Max(float64(piles[j]-dp[j+1][i+j]), float64(piles[i+j]-dp[j][i+j-1])))
}
}
return dp[0][n-1] > 0
}
func Test_377(t *testing.T) {
t.Log(stoneGame([]int{5, 3, 4, 5})) //true
t.Log(stoneGame([]int{3, 2, 10, 4})) //true
}
|
package main
import (
"context"
"fmt"
"net/http"
"os"
"runtime"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"pack.ag/amqp"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
var (
username string
password string
mapLock = &sync.RWMutex{}
amqpURL = kingpin.Flag("amqp", "AMQP URL").Default("amqp://127.0.0.1:5672").String()
ip = kingpin.Flag("listen", "IP/Port to listen to").Default(":2778").String()
level = kingpin.Flag("log-level", "log level").Default("info").String()
link = kingpin.Flag("link", "Link").Default("alerts").String()
client *amqp.Client
session *amqp.Session
sender *amqp.Sender
ctx context.Context
upInfo = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "amqp",
Name: "up",
Help: "Status of the AMQP connection",
},
)
)
func main() {
kingpin.Parse()
l, err := log.ParseLevel(*level)
if err != nil {
log.Fatal(err)
}
log.SetLevel(l)
username = os.Getenv("AMQP_USER")
if username == "" {
log.Fatal("AMQP_USER can't be empty")
}
password = os.Getenv("AMQP_PASSWORD")
if password == "" {
log.Fatal("AMQP_PASSWORD can't be empty")
}
inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "alert",
Help: "A gauge of requests currently being served by the wrapped handler.",
})
duration := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "amqp_handler_request_duration_seconds",
Help: "A histogram of latencies for requests.",
Buckets: []float64{.25, .5, 1, 2.5, 5, 10},
},
[]string{"method"},
)
upInfo.Set(0)
prometheus.MustRegister(upInfo)
buildInfo := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "alert2amqp",
Name: "build_info",
Help: fmt.Sprintf(
"A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
"alert2amqp",
),
},
[]string{"version", "goversion"},
)
buildInfo.WithLabelValues(version, runtime.Version()).Set(1)
prometheus.MustRegister(buildInfo)
counter := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "amqp_handler_requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"code", "method"},
)
amqpChain := promhttp.InstrumentHandlerInFlight(inFlightGauge,
promhttp.InstrumentHandlerDuration(duration,
promhttp.InstrumentHandlerCounter(counter,
&amqpHandler{},
),
),
)
prometheus.MustRegister(inFlightGauge, counter, duration)
http.Handle("/metrics", promhttp.Handler())
http.Handle("/", amqpChain)
log.Infof("Listering on %s", *ip)
go connectAMQP()
log.Fatal(http.ListenAndServe(*ip, nil))
}
// recoverAMQP is called when the AMQP connection is not working
func recoverAMQP() {
upInfo.Set(0)
closeCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if sender != nil {
sender.Close(closeCtx)
}
if session != nil {
session.Close(closeCtx)
}
if client != nil {
client.Close()
}
connectAMQP()
}
func connectAMQP() {
var err error
client, err = amqp.Dial(*amqpURL,
amqp.ConnSASLPlain(username, password),
)
if err != nil {
log.Error("Dialing AMQP server:", err)
return
}
session, err = client.NewSession()
if err != nil {
log.Error("Creating AMQP session:", err)
return
}
sender, err = session.NewSender(
amqp.LinkTargetAddress(*link),
)
if err != nil {
log.Error("Creating Sender: ", err)
return
}
upInfo.Set(1)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.