text stringlengths 11 4.05M |
|---|
package abbreviation
import "testing"
type testCase struct {
name string
a string
b string
want bool
}
var testCases = []testCase{
{"simple", "abcd", "ABCD", true},
{"match", "ABCD", "ABCD", true},
{"0", "daBcd", "ABC", true},
{"1", "abcDE", "ABDE", true},
{"2", "AbcDE", "AFDE", false},
{"can replace", "AbcBcD", "ABCD", true},
{"don't skip capitals", "AbCBcD", "ABCD", false},
{"extra capitals", "abcdeF", "ABCDE", false},
}
func TestAbbreviation(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Logf("%q %q", tc.a, tc.b)
got := abbreviation(tc.a, tc.b)
if got != tc.want {
t.Errorf("%q %q = %t; wanted %t", tc.a, tc.b, got, tc.want)
}
})
}
}
|
package main
import "testing"
func TestIdentityInit(t *testing.T) {
identity := Identity{}
identity.Init()
if len(identity.Id) != 64 {
t.Errorf("Error creating identity thumbprint (identity.id), character length is %d instead of 64.", len(identity.Id))
}
jwk, err := identity.GetPrivateKey()
if jwk == nil || err != nil {
t.Fatalf("Could not get private key. ERROR: %s", err)
}
if jwk.KeyType() != "RSA" {
t.Fatalf("key type must be %q, instead got %q", "RSA", jwk.KeyType())
}
}
|
package emotiva
import (
"fmt"
)
func (ec *EmotivaController) Status(commands []string, target interface{}) (string, error) {
var command string
for _, c := range commands {
command = fmt.Sprintf("%s<%s/>", command, c)
}
body, err := ec.rw(fmt.Sprintf("<emotivaSubscription>%s</emotivaSubscription>", command), ec.controlTX, ec.controlRX, target)
if err != nil {
return "", err
}
return body, nil
}
func (ec *EmotivaController) Subscribe(commands []string, target interface{}) (string, error) {
var command string
for _, c := range commands {
command = fmt.Sprintf("%s<%s/>", command, c)
}
body, err := ec.rw(fmt.Sprintf("<emotivaSubscription>%s</emotivaSubscription>", command), ec.controlTX, ec.controlRX, target)
if err != nil {
return "", err
}
return body, nil
}
func (ec *EmotivaController) Control(command string, target interface{}) (string, error) {
body, err := ec.rw(fmt.Sprintf("<emotivaControl>%s</emotivaControl>", command), ec.controlTX, ec.controlRX, target)
if err != nil {
return "", err
}
return body, nil
}
|
//+build wireinject
package network
import (
"github.com/google/wire"
"github.com/raba-jp/primus/pkg/backend"
"github.com/raba-jp/primus/pkg/operations/network/handlers"
"github.com/raba-jp/primus/pkg/operations/network/starlarkfn"
"github.com/raba-jp/primus/pkg/starlark"
)
func HTTPRequest() starlark.Fn {
wire.Build(
backend.NewFs,
backend.NewHTTPClient,
handlers.NewHTTPRequest,
starlarkfn.HTTPRequest,
)
return nil
}
|
package common
type BlockStoreSourceType string
const (
FileStore BlockStoreSourceType = "file"
BlockDevice BlockStoreSourceType = "block"
)
type BlockStoreType string
const (
KernelImage BlockStoreType = "kernel"
DiskImage BlockStoreType = "image"
)
type BlockStoreSource struct {
SourceType BlockStoreSourceType
Type BlockStoreType
path string //the path to the back end type..
storage StorageDriver
nodeUUID string
clusterUUID string
isImage bool
}
func (bss *BlockStoreSource) GetPath() string {
//return the poath to this store on this device...
return ""
}
func (bss *BlockStoreSource) GetURI() string {
//return the poath to this store on this device...
return ""
}
func (bss *BlockStoreSource) GetClusterURI() string {
//return the poath to this store on this device...
return ""
}
func (bss *BlockStoreSource) IsShared(clusterUUID string) (bool, error) {
//check if this target is shared for the cluster specified
return false, nil
}
func (bss *BlockStoreSource) IsKernel() bool {
//check if this target is shared for the cluster specified
return false
}
func (bss *BlockStoreSource) IsImage() bool {
//check if this target is shared for the cluster specified
return false
}
func (bss *BlockStoreSource) IsDisk() bool {
//check if this target is shared for the cluster specified
return false
}
func (bss *BlockStoreSource) IsBlockDevice() (bool, error) {
//check if this target is a block device (natively) or not
return false, nil
}
|
package main
import (
"encoding/json"
"fmt"
)
type Person struct {
First string
Last string
Age int
notExported string
}
func main() {
p := Person{First: "Tyler", Last: "Mizuyabu", Age: 20, notExported: "This string won't be marshalled"}
fmt.Println(p)
//Will only marshal fields that can be exported
bs, _ := json.Marshal(p)
fmt.Println(bs)
fmt.Println()
fmt.Println(string(bs))
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpccalls
import (
"crypto/tls"
"io"
"net"
"net/rpc"
"net/rpc/jsonrpc"
)
// Client - to hold RPC connections streams
type Client struct {
conn net.Conn
client *rpc.Client
testnet bool
verbose bool
handle io.Writer // if verbose is set output items here
}
// NewClient - create a RPC connection to a bitmarkd
func NewClient(testnet bool, connect string, verbose bool, handle io.Writer) (*Client, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
}
conn, err := tls.Dial("tcp", connect, tlsConfig)
if nil != err {
return nil, err
}
r := &Client{
conn: conn,
client: jsonrpc.NewClient(conn),
testnet: testnet,
verbose: verbose,
handle: handle,
}
return r, nil
}
// Close - shutdown the bitmarkd connection
func (c *Client) Close() {
c.client.Close()
c.conn.Close()
}
|
package app
import (
"fmt"
"log"
"net"
"reflect"
"github.com/math2001/boatsinker/server/utils"
"github.com/mitchellh/mapstructure"
)
// makes sure that there is a right amount of boats, of the right size, etc...
func validBoats(boats []Boat) error {
if len(boats) != len(boatsizes) {
return fmt.Errorf("Invalid number of boats. Should have %d, got %d", len(boatsizes),
len(boats))
}
// ensure we have the right amount of boats of the right size
var sizes = make(map[int]int)
var occupied []Point
for _, boat := range boats {
if boat.Size <= 1 {
return fmt.Errorf("Invalid boat size: should be >1, got %d", boat.Size)
}
if boat.Pos.X < 0 || boat.Pos.X >= width || boat.Pos.Y < 0 || boat.Pos.Y >= height {
return fmt.Errorf("Invalid boat origin point: should be 0 < x or y < 10, got %s",
boat.Pos)
}
if (boat.Rotation == 0 && boat.Pos.X+boat.Size >= width) || (boat.Rotation == 1 && boat.Pos.Y+boat.Size >= height) {
return fmt.Errorf("Invalid boat position: some of it is outside the map")
}
if boat.Rotation != 0 && boat.Rotation != 1 {
return fmt.Errorf("Invlaid boat rotation: should be 0 or 1, got %d",
boat.Rotation)
}
sizes[boat.Size]++
var pt Point
for i := 0; i < boat.Size; i++ {
if boat.Rotation == 0 {
pt = Point{boat.Pos.X + i, boat.Pos.Y}
} else {
pt = Point{boat.Pos.X, boat.Pos.Y + i}
}
for _, t := range occupied {
if t == pt {
return fmt.Errorf("Invalid boats position: they collide on the case %s", pt)
}
}
occupied = append(occupied, pt)
}
}
if !reflect.DeepEqual(sizes, boatsizes) {
return fmt.Errorf("Invalid boat sizes: should have %v, got %v", sizes, boatsizes)
}
return nil
}
type boardsetup struct {
Kind string
Boats []Boat
}
func handleBoardSetup(players map[*net.Conn]*Player, msg utils.Message) error {
var b boardsetup
if err := mapstructure.Decode(msg.Data, &b); err != nil {
log.Fatalf("Couldn't convert msg to boardsetup: %s", err)
}
if err := validBoats(b.Boats); err != nil {
return err
}
players[msg.From].Board = Board{
Boats: b.Boats,
}
return nil
}
|
package main
import (
"fmt"
"libofm"
)
func main() {
mycontent := "Dear my golang"
email := libofm.NewEmail("seafooler@hust.edu.cn", "test golang email", mycontent)
err := libofm.SendEmail(email)
fmt.Println(err)
}
|
package system
import (
"errors"
"github.com/caddyserver/caddy"
)
// ParseCorefile parses a CoreDNS Corefile's 'contracore' config block.
func ParseCorefile(c *caddy.Controller) {
c.Next()
if c.Val() != "contracore" {
panic(errors.New("unexpected plugin name '" + c.Val() + "'"))
}
c.Next()
if c.Val() != "{" {
panic(errors.New("expected opening brace"))
}
for c.Next() {
if c.Val() == "}" {
break
}
for _, field := range fields {
if field == c.Val() {
switch c.Val() {
case "ContraDBURL":
c.Next()
ContraDBURL = c.Val()
case "ContraLogURL":
c.Next()
ContraLogURL = c.Val()
default:
panic(errors.New("unhandled field '" + c.Val() + "'"))
}
goto next
}
}
panic(errors.New("unexpected token '" + c.Val() + "'"))
next:
}
}
// An array of valid 'contracore' config fields.
var fields = [...]string{"ContraDBURL", "ContraLogURL"}
|
package main
import (
"fmt"
)
func is_prime(n int64) bool {
var i int64;
for i = 2; i < n; i++ {
if n % i == 0 {
return false
}
}
return true
}
func main() {
var count int64 = 0
var max int64 = 100000
var i int64
for i = 2; i <= max; i++ {
if is_prime(i) {
count++
}
}
fmt.Printf("1-%d: %d prime numbers\n", max, count)
} |
package main
import (
"encoding/csv"
"log"
"os"
"strings"
"github.com/gocolly/colly"
"github.com/wagnerfonseca/scraper-fundamentus/model"
)
func main() {
fileName := "fundamentus.csv"
file, err := os.Create(fileName)
if err != nil {
log.Fatalf("Cannot create file %q: %s\n", fileName, err)
return
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
writer.Write(header())
// Instantiate default collector
c := colly.NewCollector()
// Create another collector to scrape symbols details
detailCollector := c.Clone()
m := make(map[string]map[string]string)
var symbol string = ""
// Before making a request print "Visiting ..."
c.OnRequest(func(r *colly.Request) {
log.Println("visiting", r.URL.String())
})
// On every a HTML element which has name attribute call callback
c.OnHTML(`a[href]`, func(e *colly.HTMLElement) {
// Activate detailCollector if the link contains details
symbolURL := e.Request.AbsoluteURL(e.Attr("href"))
if strings.Index(symbolURL, "detalhes.php?papel=") != -1 {
log.Println("==>", symbolURL)
detailCollector.Visit(symbolURL)
s := strings.Replace(symbolURL, "https://www.fundamentus.com.br/detalhes.php?papel=", "", -1)
symbol := &model.Symbol{}
_, ok := m[s]
if ok {
for k, v := range m[s] {
model.Build(symbol, k, v)
}
writer.Write(buildRow(symbol))
}
}
})
// Extract details of the symbol
detailCollector.OnHTML("table tr", func(e *colly.HTMLElement) {
if strings.Contains(e.ChildText("td:first-child"), "Papel") {
symbol = strings.Replace(e.ChildText("td:nth-child(2)"), "?", "", -1)
}
if e.ChildText("td:first-child") != "" {
add(m, symbol, strings.Replace(e.ChildText("td:first-child"), "?", "", -1), e.ChildText("td:nth-child(2)"))
}
if e.ChildText("td:nth-child(3)") != "" {
add(m, symbol, strings.Replace(e.ChildText("td:nth-child(3)"), "?", "", -1), e.ChildText("td:nth-child(4)"))
}
if e.ChildText("td:nth-child(5)") != "" {
add(m, symbol, strings.Replace(e.ChildText("td:nth-child(5)"), "?", "", -1), e.ChildText("td:nth-child(6)"))
}
})
c.Visit("https://www.fundamentus.com.br/detalhes.php")
log.Printf("Scraping finished, check file %q for results\n", fileName)
// Display collector's statistics
log.Println(c)
}
// add adding new data
func add(m map[string]map[string]string, symbol, key, value string) {
mm, ok := m[symbol]
if !ok {
mm = make(map[string]string)
m[symbol] = mm
}
mm[key] = value
}
// buildRow build row for csv file
func buildRow(symbol *model.Symbol) []string {
return []string{
symbol.Papel,
symbol.Cotacao,
symbol.DataUtlCot,
symbol.Empresa,
symbol.Setor,
symbol.Subsetor,
symbol.VolMed,
symbol.ValorMercado,
symbol.NroAcoes,
symbol.PL,
symbol.PVP,
symbol.VPA,
symbol.PEBIT,
symbol.Ativo,
symbol.PAtivCircLiq,
symbol.PAtivos,
symbol.PatrimLiq,
symbol.ReceitaLiquida,
symbol.LucroLiquido,
symbol.EBIT,
symbol.EVEBITDA,
symbol.EVEBIT,
symbol.MargLiquida,
symbol.MargBruta,
symbol.MargEBIT,
symbol.PCapGiro,
symbol.DivYield,
symbol.DividaLiquida,
symbol.DividaBruta,
symbol.ROIC,
symbol.ROE,
symbol.LiquidezCorrente,
symbol.DivBrPatrim,
symbol.GiroAtivos,
}
}
// header build the header to csv file
func header() []string {
return []string{
"Papel",
"Cotacao",
"Data Utl Cot.",
"Empresa",
"Setor",
"Subsetor",
"Vol. Med.",
"Valor Mercado",
"Nro. Acoes",
"P/L",
"P/VP",
"VPA",
"P/EBIT",
"Ativo",
"P/Ativ Circ Liq",
"P/Ativos",
"Patrim Liquido",
"Receita Liquida",
"Lucro Liquido",
"EBIT",
"EVEBITDA",
"EVEBIT",
"Marg. Liquida",
"Marg. Bruta",
"Marg. EBIT",
"P/CapGiro",
"Div. Yield",
"Divida Liquida",
"Divida Bruta",
"ROIC",
"ROE",
"Liquidez Corrente",
"Div. Br Patrim",
"Giro Ativos",
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"fmt"
"time"
"chromiumos/tast/remote/firmware/fixture"
"chromiumos/tast/ssh"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: ECWatchdog,
Desc: "Servo based EC watchdog test",
Contacts: []string{"js@semihalf.com", "chromeos-firmware@google.com"},
Attr: []string{"group:firmware", "firmware_unstable"},
Fixture: fixture.NormalMode,
HardwareDeps: hwdep.D(hwdep.ChromeEC()),
})
}
func ECWatchdog(ctx context.Context, s *testing.State) {
const (
// Delay of spin-wait in ms. Nuvoton boards set the hardware watchdog to
// 3187.5ms and also sets a timer to 2200ms. Set the timeout long enough to
// exceed the hardware watchdog timer because the timer isn't 100% reliable.
// If there are other platforms that use a longer watchdog timeout, this
// may need to be adjusted.
watchdogDelay = 3700 * time.Millisecond
// Delay of EC power on.
ecBootDelay = 1000 * time.Millisecond
)
var (
oldBootID string
newBootID string
err error
)
h := s.FixtValue().(*fixture.Value).Helper
if err := h.RequireServo(ctx); err != nil {
s.Fatal("Failed to connect to servod")
}
if oldBootID, err = h.Reporter.BootID(ctx); err != nil {
s.Fatal("Failed to fetch current boot ID: ", err)
}
s.Log("Trigger a watchdog reset and power on system again")
err = h.DUT.Conn().CommandContext(ctx, "sync").Run(ssh.DumpLogOnError)
if err != nil {
s.Fatal("Failed to sync IO on DUT before calling watchdog: ", err)
}
s.Log("Trigger watchdog event")
err = h.Servo.RunECCommand(ctx, fmt.Sprintf("waitms %d", watchdogDelay))
if err != nil {
s.Fatal("Failed to send watchdog timer command to EC: ", err)
}
s.Log("Sleep during watchdog reset")
if err = testing.Sleep(ctx, watchdogDelay+ecBootDelay); err != nil {
s.Fatal("Failed to sleep during waiting for EC to get up: ", err)
}
s.Log("Wait for DUT to reconnect")
if err = h.DUT.WaitConnect(ctx); err != nil {
s.Fatal("Failed to reconnect to DUT: ", err)
}
if newBootID, err = h.Reporter.BootID(ctx); err != nil {
s.Fatal("Failed to fetch current boot ID: ", err)
}
if newBootID == oldBootID {
s.Fatal("Failed to reboot trigger watchdog reset, old boot ID is the same as new boot ID")
}
}
|
package service
import (
"errors"
"fmt"
"net/http"
"golang-seed/apps/auth/pkg/authconst"
"golang-seed/apps/auth/pkg/models"
"golang-seed/apps/auth/pkg/repo"
"golang-seed/pkg/database"
"golang-seed/pkg/httperror"
"golang-seed/pkg/pagination"
"golang-seed/pkg/sorting"
)
type PermissionsService struct {
}
func NewPermissionsService() *PermissionsService {
return &PermissionsService{}
}
func (s PermissionsService) GetByID(id string) (*models.Permission, error) {
permission := &models.Permission{
Code: id,
}
err := repo.Repo.Permissions().Get(permission)
if err != nil {
if errors.Is(err, database.ErrRecordNotFound) {
return nil, httperror.ErrorCauseT(
err,
http.StatusNotFound,
authconst.GeneralErrorRegisterNotFoundParams,
authconst.PermissionsPermissions,
fmt.Sprintf("code : %s", id))
}
return nil, httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return permission, nil
}
func (s PermissionsService) Get(permission *models.Permission) error {
err := repo.Repo.Permissions().WhereModel(permission).Get(permission)
if err != nil {
if errors.Is(err, database.ErrRecordNotFound) {
return httperror.ErrorCauseT(
err,
http.StatusNotFound,
authconst.GeneralErrorRegisterNotFoundParams,
authconst.PermissionsPermissions,
permission.String())
}
httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return nil
}
func (s PermissionsService) GetAll(params map[string]interface{}, sort sorting.Sort) ([]models.Permission, error) {
var permissions []models.Permission
collection, err := repo.Repo.Permissions().WhereMap(params)
if err != nil {
httperror.ErrorCauseT(err, http.StatusBadRequest, err.Error())
}
collection, err = collection.Order(sort)
if err != nil {
httperror.ErrorCauseT(err, http.StatusBadRequest, err.Error())
}
err = collection.Find(&permissions)
if err != nil {
if errors.Is(err, database.ErrRecordNotFound) {
return nil, httperror.ErrorCauseT(
err,
http.StatusNotFound,
authconst.GeneralErrorRegisterNotFound,
authconst.PermissionsPermissions)
}
return nil, httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return permissions, nil
}
func (s PermissionsService) GetAllPaged(params map[string]interface{}, sort sorting.Sort, pageable pagination.Pageable) (*pagination.Page, error) {
var permissions []models.Permission
collection, err := repo.Repo.Permissions().WhereMap(params)
if err != nil {
httperror.ErrorCauseT(err, http.StatusBadRequest, err.Error())
}
collectiono, err := collection.Order(sort)
if err != nil {
httperror.ErrorCauseT(err, http.StatusBadRequest, err.Error())
}
err = collectiono.Pageable(pageable).Find(&permissions)
if err != nil {
return nil, httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
var count int64
err = collection.Count(&count)
if err != nil {
return nil, httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return pagination.NewPage(pageable, int(count), permissions), nil
}
func (s PermissionsService) Create(model *models.Permission) error {
exists, err := repo.Repo.Permissions().Exists(model)
if err != nil {
httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
if exists {
return httperror.ErrorT(
http.StatusConflict,
authconst.GeneralErrorRegisterAlreadyExists,
authconst.PermissionsPermission,
fmt.Sprintf("code : %s", model.Code))
}
err = repo.Repo.Permissions().Create(model)
if err != nil {
httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return nil
}
func (s PermissionsService) Update(model *models.Permission) error {
permission := &models.Permission{Code: model.Code}
exists, err := repo.Repo.Permissions().Exists(permission)
if err != nil {
httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
if !exists {
return httperror.ErrorT(
http.StatusNotFound,
authconst.GeneralErrorRegisterNotFoundParams,
authconst.ClientsClients,
fmt.Sprintf("code : %s", model.Code))
}
model.CreatedAt = permission.CreatedAt
err = repo.Repo.Permissions().WhereModel(&models.Permission{Code: model.Code}).Update(model)
if err != nil {
httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return nil
}
func (s PermissionsService) Delete(id string) error {
permission := &models.Permission{
Code: id,
}
err := repo.Repo.Permissions().Delete(permission)
if err != nil {
if errors.Is(err, database.ErrRecordNotFound) {
return httperror.ErrorCauseT(
err,
http.StatusNotFound,
authconst.GeneralErrorRegisterNotFoundParams,
authconst.PermissionsPermissions,
fmt.Sprintf("code : %s", id))
}
httperror.ErrorCauseT(err, http.StatusInternalServerError, authconst.GeneralErrorAccessingDatabase)
}
return nil
}
|
package parser_test
import (
"github.com/bytesparadise/libasciidoc/pkg/types"
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("inline elements", func() {
Context("in final documents", func() {
It("bold text without parenthesis", func() {
source := "*some bold content*"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.QuotedText{
Kind: types.SingleQuoteBold,
Elements: []interface{}{
&types.StringElement{Content: "some bold content"},
},
},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("bold text within parenthesis", func() {
source := "(*some bold content*)"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{Content: "("},
&types.QuotedText{
Kind: types.SingleQuoteBold,
Elements: []interface{}{
&types.StringElement{Content: "some bold content"},
},
},
&types.StringElement{Content: ")"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("non-bold text within words", func() {
source := "some*bold*content"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{Content: "some*bold*content"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("non-italic text within words", func() {
source := "some_italic_content"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{Content: "some_italic_content"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("non-monospace text within words", func() {
source := "some`monospace`content"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{Content: "some`monospace`content"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("invalid bold portion of text", func() {
source := "*foo*bar"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{Content: "*foo*bar"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("valid bold portion of text", func() {
source := "**foo**bar"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.QuotedText{
Kind: types.DoubleQuoteBold,
Elements: []interface{}{
&types.StringElement{Content: "foo"},
},
},
&types.StringElement{Content: "bar"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
It("latin characters", func() {
source := "à bientôt"
expected := &types.Document{
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{Content: "à bientôt"},
},
},
},
}
Expect(ParseDocument(source)).To(MatchDocument(expected))
})
})
})
|
/**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"fmt"
"sync"
"time"
"github.com/IBM/appconfiguration-go-sdk/lib/internal/constants"
messages "github.com/IBM/appconfiguration-go-sdk/lib/internal/messages"
"github.com/IBM/appconfiguration-go-sdk/lib/internal/utils/log"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/robfig/cron"
)
// Usages : Usages struct
type Usages struct {
FeatureID string `json:"feature_id,omitempty"`
PropertyID string `json:"property_id,omitempty"`
EntityID string `json:"entity_id"`
SegmentID interface{} `json:"segment_id"`
EvaluationTime string `json:"evaluation_time"`
Count int64 `json:"count"`
}
// CollectionUsages : CollectionUsages struct
type CollectionUsages struct {
CollectionID string `json:"collection_id"`
EnvironmentID string `json:"environment_id"`
Usages []Usages `json:"usages"`
}
type featureMetric struct {
count int64
evaluationTime string
}
// Metering : Metering struct
type Metering struct {
CollectionID string
EnvironmentID string
guid string
mu sync.Mutex
meteringFeatureData map[string]map[string]map[string]map[string]map[string]map[string]featureMetric //guid->EnvironmentID->CollectionID->featureId->entityId->segmentId
meteringPropertyData map[string]map[string]map[string]map[string]map[string]map[string]featureMetric //guid->EnvironmentID->CollectionID->propertyId->entityId->segmentId
}
// SendInterval : SendInterval struct
const SendInterval = "10m"
var meteringInstance *Metering
// GetMeteringInstance : Get Metering Instance
func GetMeteringInstance() *Metering {
log.Debug(messages.RetrieveMeteringInstance)
if meteringInstance == nil {
meteringInstance = &Metering{}
guidFeatureMap := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
guidPropertyMap := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
meteringInstance.meteringFeatureData = guidFeatureMap
meteringInstance.meteringPropertyData = guidPropertyMap
// start sending metering data in the background
log.Debug(messages.StartSendingMeteringData)
c := cron.New()
c.AddFunc("@every "+SendInterval, meteringInstance.sendMetering)
c.Start()
}
return meteringInstance
}
// Init : Init
func (mt *Metering) Init(guid string, environmentID string, collectionID string) {
mt.guid = guid
mt.EnvironmentID = environmentID
mt.CollectionID = collectionID
}
func (mt *Metering) addMetering(guid string, environmentID string, collectionID string, entityID string, segmentID string, featureID string, propertyID string) {
log.Debug(messages.AddMetering)
defer GracefullyHandleError()
mt.mu.Lock()
t := time.Now().UTC()
formattedTime := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02dZ",
t.Year(), t.Month(), t.Day(),
t.Hour(), t.Minute(), t.Second())
var fm featureMetric
fm.evaluationTime = formattedTime
fm.count = 1
meteringData := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
var modifyKey string
if featureID != "" {
meteringData = meteringInstance.meteringFeatureData
modifyKey = featureID
} else {
meteringData = meteringInstance.meteringPropertyData
modifyKey = propertyID
}
if _, ok := meteringData[guid]; ok {
guidVal := meteringData[guid]
if _, ok := guidVal[environmentID]; ok {
envIDVal := guidVal[environmentID]
if _, ok := envIDVal[collectionID]; ok {
collectionIDVal := envIDVal[collectionID]
if _, ok := collectionIDVal[modifyKey]; ok {
modifyKeyVal := collectionIDVal[modifyKey]
if _, ok := modifyKeyVal[entityID]; ok {
entityIDVal := modifyKeyVal[entityID]
if _, ok := entityIDVal[segmentID]; ok {
segmentIDVal := entityIDVal[segmentID]
segmentIDVal.evaluationTime = formattedTime
segmentIDVal.count = segmentIDVal.count + 1
entityIDVal[segmentID] = segmentIDVal
} else {
entityIDVal[segmentID] = fm
}
} else {
segmentMap := make(map[string]featureMetric)
segmentMap[segmentID] = fm
modifyKeyVal[entityID] = segmentMap
}
} else {
segmentMap := make(map[string]featureMetric)
entityMap := make(map[string]map[string]featureMetric)
segmentMap[segmentID] = fm
entityMap[entityID] = segmentMap
collectionIDVal[modifyKey] = entityMap
}
} else {
segmentMap := make(map[string]featureMetric)
entityMap := make(map[string]map[string]featureMetric)
modifyKeyMap := make(map[string]map[string]map[string]featureMetric)
segmentMap[segmentID] = fm
entityMap[entityID] = segmentMap
modifyKeyMap[modifyKey] = entityMap
envIDVal[collectionID] = modifyKeyMap
}
} else {
segmentMap := make(map[string]featureMetric)
entityMap := make(map[string]map[string]featureMetric)
modifyKeyMap := make(map[string]map[string]map[string]featureMetric)
collectionMap := make(map[string]map[string]map[string]map[string]featureMetric)
segmentMap[segmentID] = fm
entityMap[entityID] = segmentMap
modifyKeyMap[modifyKey] = entityMap
collectionMap[collectionID] = modifyKeyMap
guidVal[environmentID] = collectionMap
}
} else {
segmentMap := make(map[string]featureMetric)
entityMap := make(map[string]map[string]featureMetric)
modifyKeyMap := make(map[string]map[string]map[string]featureMetric)
collectionMap := make(map[string]map[string]map[string]map[string]featureMetric)
environmentMap := make(map[string]map[string]map[string]map[string]map[string]featureMetric)
segmentMap[segmentID] = fm
entityMap[entityID] = segmentMap
modifyKeyMap[modifyKey] = entityMap
collectionMap[collectionID] = modifyKeyMap
environmentMap[environmentID] = collectionMap
meteringData[guid] = environmentMap
}
mt.mu.Unlock()
}
// RecordEvaluation : Record Evaluation
func (mt *Metering) RecordEvaluation(featureID string, propertyID string, entityID string, segmentID string) {
log.Debug(messages.RecordEval)
mt.addMetering(mt.guid, mt.EnvironmentID, mt.CollectionID, entityID, segmentID, featureID, propertyID)
}
func (mt *Metering) buildRequestBody(sendMeteringData map[string]map[string]map[string]map[string]map[string]map[string]featureMetric, guidMap map[string][]CollectionUsages, key string) {
for guid, environmentMap := range sendMeteringData {
var collectionUsageArray []CollectionUsages
if _, ok := guidMap[guid]; !ok {
guidMap[guid] = collectionUsageArray
}
for environmentID, collectionMap := range environmentMap {
for collectionID, featureMap := range collectionMap {
var usagesArray []Usages
for featureID, entityMap := range featureMap {
for entityID, segmentMap := range entityMap {
for segmentID, val := range segmentMap {
var usages Usages
if key == "feature_id" {
usages.FeatureID = featureID
} else {
usages.PropertyID = featureID
}
if segmentID == constants.DefaultSegmentID {
usages.SegmentID = nil
} else {
usages.SegmentID = segmentID
}
usages.EntityID = entityID
usages.EvaluationTime = val.evaluationTime
usages.Count = val.count
usagesArray = append(usagesArray, usages)
}
}
}
var collectionUsageElem CollectionUsages
collectionUsageElem.CollectionID = collectionID
collectionUsageElem.EnvironmentID = environmentID
collectionUsageElem.Usages = usagesArray
collectionUsageArray = append(collectionUsageArray, collectionUsageElem)
}
}
guidMap[guid] = append(guidMap[guid], collectionUsageArray...)
}
}
func (mt *Metering) sendMetering() {
log.Debug(messages.TenMinExpiry)
defer GracefullyHandleError()
log.Debug(mt.meteringFeatureData)
log.Debug(mt.meteringPropertyData)
mt.mu.Lock()
if len(mt.meteringFeatureData) <= 0 && len(mt.meteringPropertyData) <= 0 {
mt.mu.Unlock()
return
}
sendFeatureData := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
sendFeatureData = mt.meteringFeatureData
meteringFeatureDataMap := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
mt.meteringFeatureData = meteringFeatureDataMap
sendPropertyData := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
sendPropertyData = mt.meteringPropertyData
meteringPropertyDataMap := make(map[string]map[string]map[string]map[string]map[string]map[string]featureMetric)
mt.meteringPropertyData = meteringPropertyDataMap
mt.mu.Unlock()
guidMap := make(map[string][]CollectionUsages)
if len(sendFeatureData) > 0 {
mt.buildRequestBody(sendFeatureData, guidMap, "feature_id")
}
if len(sendPropertyData) > 0 {
mt.buildRequestBody(sendPropertyData, guidMap, "property_id")
}
for guid, val := range guidMap {
for _, collectionUsage := range val {
var count int = len(collectionUsage.Usages)
if count > constants.DefaultUsageLimit {
mt.sendSplitMetering(guid, collectionUsage, count)
} else {
mt.sendToServer(guid, collectionUsage)
}
}
}
}
func (mt *Metering) sendSplitMetering(guid string, collectionUsages CollectionUsages, count int) {
var lim int = 0
subUsages := collectionUsages.Usages
for lim <= count {
var endIndex int
if lim+constants.DefaultUsageLimit >= count {
endIndex = count
} else {
endIndex = lim + constants.DefaultUsageLimit
}
var collectionUsageElem CollectionUsages
collectionUsageElem.CollectionID = collectionUsages.CollectionID
collectionUsageElem.EnvironmentID = collectionUsages.EnvironmentID
for i := lim; i < endIndex; i++ {
collectionUsageElem.Usages = append(collectionUsageElem.Usages, subUsages[i])
}
mt.sendToServer(guid, collectionUsageElem)
lim = lim + constants.DefaultUsageLimit
}
}
func (mt *Metering) sendToServer(guid string, collectionUsages CollectionUsages) {
log.Debug(messages.SendMeteringServer)
log.Debug(collectionUsages)
builder := core.NewRequestBuilder(core.POST)
pathParamsMap := map[string]string{
"guid": mt.guid,
}
_, err := builder.ResolveRequestURL(urlBuilderInstance.GetBaseServiceURL(), `/apprapp/events/v1/instances/{guid}/usage`, pathParamsMap)
if err != nil {
return
}
builder.AddHeader("Accept", "application/json")
builder.AddHeader("Content-Type", "application/json")
builder.AddHeader("User-Agent", constants.UserAgent)
_, err = builder.SetBodyContentJSON(collectionUsages)
if err != nil {
return
}
response := GetAPIManagerInstance().Request(builder)
if response != nil && response.StatusCode >= 200 && response.StatusCode <= 299 {
log.Debug(messages.SendMeteringSuccess)
} else {
log.Error(messages.SendMeteringServerErr, err)
return
}
}
|
package websockethub
import (
"context"
"errors"
"net/http"
"sync/atomic"
"nhooyr.io/websocket"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/hive.go/runtime/event"
)
var (
ErrWebsocketServerUnavailable = errors.New("websocket server unavailable")
ErrClientDisconnected = errors.New("client was disconnected")
)
type ClientConnectionEvent struct {
ID ClientID
}
// Events contains all the events that are triggered by the websocket hub.
type Events struct {
// A ClientConnected event is triggered, when a new client has connected to the websocket hub.
ClientConnected *event.Event1[*ClientConnectionEvent]
// A ClientDisconnected event is triggered, when a client has disconnected from the websocket hub.
ClientDisconnected *event.Event1[*ClientConnectionEvent]
}
func newEvents() *Events {
return &Events{
ClientConnected: event.New1[*ClientConnectionEvent](),
ClientDisconnected: event.New1[*ClientConnectionEvent](),
}
}
// Hub maintains the set of active clients and broadcasts messages to the clients.
type Hub struct {
// used Logger instance.
logger *logger.Logger
// the accept options of the websocket per client.
acceptOptions *websocket.AcceptOptions
// registered clients.
clients map[*Client]struct{}
// maximum size of queued messages that should be sent to the peer.
clientSendChannelSize int
// inbound messages from the clients.
broadcast chan *message
// register requests from the clients.
register chan *Client
// unregister requests from clients.
unregister chan *Client
// context of the websocket hub
ctx context.Context
// indicates that the websocket hub was shut down
shutdownFlag atomic.Bool
// indicates the max amount of bytes that will be read from a client, i.e. the max message size
clientReadLimit int64
// lastClientID holds the ClientID of the last connected client
lastClientID atomic.Uint32
// events of the websocket hub
events *Events
}
// message is a message that is sent to the broadcast channel.
type message struct {
data interface{}
dontDrop bool
}
func NewHub(logger *logger.Logger, acceptOptions *websocket.AcceptOptions, broadcastQueueSize int, clientSendChannelSize int, clientReadLimit int64) *Hub {
h := &Hub{
logger: logger,
acceptOptions: acceptOptions,
clientSendChannelSize: clientSendChannelSize,
clients: make(map[*Client]struct{}),
broadcast: make(chan *message, broadcastQueueSize),
register: make(chan *Client, 1),
unregister: make(chan *Client, 1),
ctx: nil,
clientReadLimit: clientReadLimit,
events: newEvents(),
}
h.shutdownFlag.Store(true)
return h
}
// Events returns all the events that are triggered by the websocket hub.
func (h *Hub) Events() *Events {
return h.events
}
// BroadcastMsg sends a message to all clients.
func (h *Hub) BroadcastMsg(ctx context.Context, data interface{}, dontDrop ...bool) error {
if h.shutdownFlag.Load() {
// hub was already shut down or was not started yet
return ErrWebsocketServerUnavailable
}
notDrop := false
if len(dontDrop) > 0 {
notDrop = dontDrop[0]
}
msg := &message{data: data, dontDrop: notDrop}
if notDrop {
// we need to nest the broadcast into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-ctx.Done():
return ctx.Err()
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
default:
select {
case <-ctx.Done():
return ctx.Err()
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
case h.broadcast <- msg:
return nil
}
}
}
// we need to nest the broadcast into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-ctx.Done():
return ctx.Err()
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
default:
select {
case h.broadcast <- msg:
return nil
default:
return nil
}
}
}
func (h *Hub) removeClient(client *Client) {
delete(h.clients, client)
close(client.ExitSignal)
// wait until writePump and readPump finished
client.shutdownWaitGroup.Wait()
// drain the send channel
drainLoop:
for {
select {
case <-client.sendChan:
default:
break drainLoop
}
}
if client.onDisconnect != nil {
client.onDisconnect(client)
}
h.events.ClientDisconnected.Trigger(&ClientConnectionEvent{ID: client.id})
// We do not call "close(client.sendChan)" because we have multiple senders.
//
// As written at https://go101.org/article/channel-closing.html
// A channel will be eventually garbage collected if no goroutines reference it any more,
// whether it is closed or not.
// So the gracefulness of closing a channel here is not to close the channel.
}
// Returns the number of websocket clients.
func (h *Hub) Clients() int {
return len(h.clients)
}
// Run starts the hub.
func (h *Hub) Run(ctx context.Context) {
// set the hub context so it can be used by the clients
h.ctx = ctx
// set the hub as running
h.shutdownFlag.Store(false)
shutdownAndRemoveAllClients := func() {
h.shutdownFlag.Store(true)
for client := range h.clients {
h.removeClient(client)
}
}
for {
// we need to nest the non-error cases into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-ctx.Done():
shutdownAndRemoveAllClients()
return
default:
select {
case <-ctx.Done():
shutdownAndRemoveAllClients()
return
case client := <-h.register:
// register client
h.clients[client] = struct{}{}
client.shutdownWaitGroup.Add(3)
//nolint:contextcheck // client context is already based on the hub ctx
go client.writePump()
// first start the read pump to read pong answers from keepAlive
client.startWaitGroup.Add(1)
go client.readPump()
client.startWaitGroup.Wait()
// wait until keepAlive started, before calling onConnect
client.startWaitGroup.Add(1)
//nolint:contextcheck // client context is already based on the hub ctx
go client.keepAlive()
client.startWaitGroup.Wait()
if client.onConnect != nil {
client.onConnect(client)
}
h.events.ClientConnected.Trigger(&ClientConnectionEvent{ID: client.id})
case client := <-h.unregister:
if _, ok := h.clients[client]; ok {
h.removeClient(client)
h.logger.Infof("Removed websocket client")
}
case message := <-h.broadcast:
if message.dontDrop {
for client := range h.clients {
if client.FilterCallback != nil {
if !client.FilterCallback(client, message.data) {
// do not broadcast the message to this client
continue
}
}
// we need to nest the sendChan into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-ctx.Done():
case <-client.ExitSignal:
case <-client.sendChanClosed:
default:
select {
case <-ctx.Done():
case <-client.ExitSignal:
case <-client.sendChanClosed:
case client.sendChan <- message.data:
}
}
}
continue
}
for client := range h.clients {
if client.FilterCallback != nil {
if !client.FilterCallback(client, message.data) {
// do not broadcast the message to this client
continue
}
}
// we need to nest the sendChan into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-ctx.Done():
case <-client.ExitSignal:
case <-client.sendChanClosed:
default:
select {
case client.sendChan <- message.data:
default:
}
}
}
}
}
}
}
// ServeWebsocket handles websocket requests from the peer.
// onCreate gets called when the client is created.
// onConnect gets called when the client was registered.
func (h *Hub) ServeWebsocket(
w http.ResponseWriter,
r *http.Request,
onCreate func(client *Client),
onConnect func(client *Client),
onDisconnect func(client *Client)) error {
if h.shutdownFlag.Load() {
// hub was already shut down or was not started yet
return ErrWebsocketServerUnavailable
}
defer func() {
if r := recover(); r != nil {
h.logger.Errorf("recovered from ServeWebsocket func: %s", r)
}
}()
conn, err := websocket.Accept(w, r, h.acceptOptions)
if err != nil {
h.logger.Warn(err.Error())
return err
}
client := NewClient(h, conn, onConnect, onDisconnect)
if onCreate != nil {
onCreate(client)
}
return h.Register(client)
}
func (h *Hub) Stopped() bool {
return h.shutdownFlag.Load()
}
func (h *Hub) Register(client *Client) error {
// we need to nest the register into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
default:
select {
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
case h.register <- client:
return nil
}
}
}
func (h *Hub) Unregister(client *Client) error {
// we need to nest the unregister into the default case because
// the select cases are executed in random order if multiple
// conditions are true at the time of entry in the select case.
select {
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
default:
select {
case <-h.ctx.Done():
return ErrWebsocketServerUnavailable
case h.unregister <- client:
return nil
}
}
}
|
package testsupport_test
import (
"github.com/bytesparadise/libasciidoc/pkg/types"
"github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("parse document fragment groups", func() {
expected := []types.DocumentFragment{
{
Position: types.Position{
Start: 0,
End: 13,
},
Elements: []interface{}{
&types.Paragraph{
Elements: []interface{}{
&types.RawLine{
Content: "hello, world!",
},
},
},
},
},
}
It("should match", func() {
// given
actual := "hello, world!"
// when
result, err := testsupport.ParseDocumentFragments(actual)
// then
Expect(err).ToNot(HaveOccurred())
Expect(result).To(Equal(expected))
})
It("should not match", func() {
// given
actual := "foo"
// when
result, err := testsupport.ParseDocumentFragments(actual)
// then
Expect(err).ToNot(HaveOccurred())
Expect(result).NotTo(Equal(expected))
})
})
|
package Copy_List_with_Random_Pointer
type Node struct {
Val int
Next *Node
Random *Node
}
func copyRandomList(head *Node) *Node {
if head == nil {
return nil
}
refMap := make(map[*Node]*Node)
newHead := &Node{
Val: head.Val,
}
refMap[head] = newHead
preNew := newHead
cNew, cOld := newHead.Next, head.Next
for cOld != nil {
cNew = &Node{
Val: cOld.Val,
}
refMap[cOld] = cNew
cOld = cOld.Next
preNew.Next = cNew
preNew = preNew.Next
}
cNew, cOld = newHead, head
for cOld != nil {
cNew.Random = refMap[cOld.Random]
cOld = cOld.Next
cNew = cNew.Next
}
return newHead
}
|
package main
import (
"fmt"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
"net/http"
"strings"
)
func main() {
http.HandleFunc("/", handleRedirection)
appengine.Main()
}
// Handle redirection to somewhere
func handleRedirection(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
links := GetLinks()
name := strings.TrimLeft(r.URL.Path, "/")
url, ok := links[name]
if !ok {
http.Error(w, fmt.Sprintf("Not found / %s", name), http.StatusNotFound)
log.Errorf(ctx, "Not found / %s", name)
return
}
log.Infof(ctx, "Redirect to %s", url)
http.Redirect(w, r, url, http.StatusMovedPermanently)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"regexp"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/errors"
"chromiumos/tast/remote/firmware/fixture"
"chromiumos/tast/rpc"
"chromiumos/tast/services/cros/security"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DUTBehaviourOnACInsertionInSleep,
Desc: "Verifies that system comes back after from sleep after AC insertion",
Contacts: []string{"pathan.jilani@intel.com", "intel-chrome-system-automation-team@intel.com", "cros-fw-engprod@google.com"},
ServiceDeps: []string{"tast.cros.security.BootLockboxService"},
SoftwareDeps: []string{"chrome", "reboot"},
LacrosStatus: testing.LacrosVariantUnneeded,
Vars: []string{"servo"},
Attr: []string{"group:firmware", "firmware_unstable"},
Fixture: fixture.NormalMode,
})
}
func DUTBehaviourOnACInsertionInSleep(ctx context.Context, s *testing.State) {
const (
// cmdTimeout is a short duration used for sending commands.
cmdTimeout = 3 * time.Second
)
dut := s.DUT()
h := s.FixtValue().(*fixture.Value).Helper
if err := h.RequireConfig(ctx); err != nil {
s.Fatal("Failed to create config: ", err)
}
var C10PkgPattern = regexp.MustCompile(`C10 : ([A-Za-z0-9]+)`)
const (
SlpS0Cmd = "cat /sys/kernel/debug/pmc_core/slp_s0_residency_usec"
PkgCstateCmd = "cat /sys/kernel/debug/pmc_core/package_cstate_show"
)
cl, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(ctx)
client := security.NewBootLockboxServiceClient(cl.Conn)
if _, err := client.NewChromeLogin(ctx, &empty.Empty{}); err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
getChargerPollOptions := testing.PollOptions{
Timeout: 10 * time.Second,
Interval: 250 * time.Millisecond,
}
defer func() {
s.Log("Stopping power supply")
if err := h.SetDUTPower(ctx, true); err != nil {
s.Fatal("Failed to connect charger: ", err)
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
if attached, err := h.Servo.GetChargerAttached(ctx); err != nil {
return err
} else if !attached {
return errors.New("charger is not attached")
}
return nil
}, &getChargerPollOptions); err != nil {
s.Fatal("Check for charger failed: ", err)
}
}()
s.Log("Stopping power supply")
if err := h.SetDUTPower(ctx, false); err != nil {
s.Fatal("Failed to remove charger: ", err)
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
if attached, err := h.Servo.GetChargerAttached(ctx); err != nil {
return err
} else if attached {
return errors.New("charger is still attached - use Servo V4 Type-C or supply RPM vars")
}
return nil
}, &getChargerPollOptions); err != nil {
s.Fatal("Check for charger failed: ", err)
}
cmdOutput := func(cmd string) string {
out, err := dut.Conn().CommandContext(ctx, "bash", "-c", cmd).Output()
if err != nil {
s.Fatal("Failed to execute slp_s0_residency_usec command: ", err)
}
return string(out)
}
slpOpSetPre := cmdOutput(SlpS0Cmd)
pkgOpSetOutput := cmdOutput(PkgCstateCmd)
matchSetPre := (C10PkgPattern).FindStringSubmatch(pkgOpSetOutput)
if matchSetPre == nil {
s.Fatal("Failed to match pre PkgCstate value: ", pkgOpSetOutput)
}
pkgOpSetPre := matchSetPre[1]
powerOffCtx, cancel := context.WithTimeout(ctx, cmdTimeout)
defer cancel()
if err := h.DUT.Conn().CommandContext(powerOffCtx, "powerd_dbus_suspend").Run(); err != nil && !errors.Is(err, context.DeadlineExceeded) {
s.Fatal("Failed to power off DUT: ", err)
}
sdCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := dut.WaitUnreachable(sdCtx); err != nil {
s.Fatal("Failed to wait for unreachable: ", err)
}
s.Log("Attaching power supply")
if err := h.SetDUTPower(ctx, true); err != nil {
s.Fatal("Failed to attach charger: ", err)
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
if attached, err := h.Servo.GetChargerAttached(ctx); err != nil {
return err
} else if !attached {
return errors.New("charger is not attached")
}
return nil
}, &getChargerPollOptions); err != nil {
s.Fatal("Failed to attach charger: ", err)
}
waitCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
if err := dut.WaitConnect(waitCtx); err != nil {
s.Fatal("Failed to wait connect DUT at suspend state after insertion AC charger: ", err)
}
slpOpSetPost := cmdOutput(SlpS0Cmd)
if slpOpSetPre == slpOpSetPost {
s.Fatalf("Failed SLP counter value must be different than the value %q noted most recently %q", slpOpSetPre, slpOpSetPost)
}
if slpOpSetPost == "0" {
s.Fatal("Failed SLP counter value must be non-zero, noted is: ", slpOpSetPost)
}
pkgOpSetPostOutput := cmdOutput(PkgCstateCmd)
matchSetPost := (C10PkgPattern).FindStringSubmatch(pkgOpSetPostOutput)
if matchSetPost == nil {
s.Fatal("Failed to match post PkgCstate value: ", pkgOpSetPostOutput)
}
pkgOpSetPost := matchSetPost[1]
if pkgOpSetPre == pkgOpSetPost {
s.Fatalf("Failed Package C10 value %q must be different than value noted earlier %q", pkgOpSetPre, pkgOpSetPost)
}
if pkgOpSetPost == "0x0" || pkgOpSetPost == "0" {
s.Fatal("Failed Package C10 should be non-zero")
}
}
|
package vat
import (
"fmt"
"testing"
)
var tests = []struct {
number string
valid bool
}{
{"", false},
{"A", false},
{"AB123A01", false},
{"ATU12345678", true},
{"ATU15673009", true},
{"ATU1234567", false},
{"BE0123456789", true},
{"BE1234567891", true},
{"BE0999999999", true},
{"BE9999999999", true},
{"BE012345678", false},
{"BE123456789", false},
{"BG123456789", true},
{"BG1234567890", true},
{"BG1234567", false},
{"CHE-156.730.098 MWST", true},
{"CHE-156.730.098", true},
{"CHE156730098MWST", true},
{"CHE156730098", true},
{"CY12345678X", true},
{"CY15673009L", true},
{"CY1234567X", false},
{"CZ12345678", true},
{"CZ1234567", false},
{"DE123456789", true},
{"DE12345678", false},
{"DK12345678", true},
{"DK1234567", false},
{"EE123456789", true},
{"EE12345678", false},
{"EL123456789", true},
{"EL12345678", false},
{"ESX12345678", true},
{"ESX1234567", false},
{"FI1234567", false},
{"FI12345678", true},
{"FR12345678901", true},
{"FR1234567890", false},
{"GB999999973", true},
{"GB156730098481", true},
{"GBGD549", true},
{"GBHA549", true},
{"GB99999997", false},
{"HU12345678", true},
{"HU1234567", false},
{"HR12345678901", true},
{"HR1234567890", false},
{"IE1234567X", true},
{"IE123456X", false},
{"IT12345678901", true},
{"IT1234567890", false},
{"LT123456789", true},
{"LT12345678", false},
{"LU26375245", true},
{"LU12345678", true},
{"LU1234567", false},
{"LV12345678901", true},
{"LV1234567890", false},
{"MT12345678", true},
{"MT1234567", false},
{"NL123456789B01", true},
{"NL123456789B12", true},
{"NL12345678B12", false},
{"PL1234567890", true},
{"PL123456789", false},
{"PT123456789", true},
{"PT12345678", false},
{"RO123456789", true},
{"RO1", false}, // Romania has a really weird VAT format...
{"SE123456789012", true},
{"SE12345678901", false},
{"SI12345678", true},
{"SI1234567", false},
{"SK1234567890", true},
{"SK123456789", false},
}
func BenchmarkValidateFormat(b *testing.B) {
for i := 0; i < b.N; i++ {
ValidateNumberFormat("NL" + string(i))
}
}
func TestValidateNumber(t *testing.T) {
for _, test := range tests {
valid, err := ValidateNumberFormat(test.number)
if err != nil {
panic(err)
}
if test.valid != valid {
t.Errorf("Expected %v for %v, got %v", test.valid, test.number, valid)
}
}
}
func ExampleValidateNumber() {
vatNumber := "IE6388047V"
valid, _ := ValidateNumber(vatNumber)
fmt.Printf("Is %s valid: %t", vatNumber, valid)
// Output: Is IE6388047V valid: true
}
func TestValidateNumberFormat(t *testing.T) {
for _, test := range tests {
valid, err := ValidateNumberFormat(test.number)
if err != nil {
panic(err)
}
if test.valid != valid {
t.Errorf("Expected %v for %v, got %v", test.valid, test.number, valid)
}
}
}
func TestValidateNumberExistence(t *testing.T) {
valid, _ := ValidateNumberExistence("IE6388047V")
if !valid {
t.Error("IE6388047V is a valid VAT number.")
}
valid, _ = ValidateNumberExistence("NL123456789B01")
if valid {
t.Error("NL123456789B01 is not a valid VAT number.")
}
}
|
package rtmapi
import (
"sync"
"testing"
)
func TestNewOutgoingEventID(t *testing.T) {
eventID := NewOutgoingEventID()
if eventID.id != 0 {
t.Errorf("id value is not starting from 0. id was %d", eventID.id)
return
}
}
func TestOutgoingEventID_Next(t *testing.T) {
eventID := OutgoingEventID{
id: 0,
mutex: &sync.Mutex{},
}
nextID := eventID.Next()
if nextID != 1 {
t.Errorf("id 1 must be given on first Next() call. id was %d.", nextID)
return
}
}
|
package eth
import (
"context"
"sync"
"github.com/ethereum/go-ethereum/common"
)
// DecodeContractAddresses decode the contract address out of the given txs
// indexed by the hashes
func DecodeContractAddresses(tx []common.Hash) ([]common.Address, error) {
c, err := Dial()
if nil != err {
return nil, err
}
defer c.Close()
contracts := make([]common.Address, len(tx))
var wg sync.WaitGroup
wg.Add(len(tx))
for i, hash := range tx {
go func(i int, hash common.Hash) {
defer wg.Done()
receipt, err := c.TransactionReceipt(context.TODO(), hash)
if nil != err {
return
}
contracts[i] = receipt.ContractAddress
}(i, hash)
}
wg.Wait()
return contracts, nil
}
|
package sqlbuilder_test
import (
"sqlbuilder"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParsePostgresqlURL(t *testing.T) {
dbconn := sqlbuilder.ParsePostgresqlURL("postgres://postgres:password@localhost:5432/testdb1?sslmode=disable")
assert.Equal(t, dbconn != nil, true)
assert.Equal(t, dbconn.DbName, "testdb1")
assert.Equal(t, dbconn.Host, "localhost")
assert.Equal(t, dbconn.Port, 5432)
assert.Equal(t, dbconn.Username, "postgres")
assert.Equal(t, dbconn.Password, "password")
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package windowarrangementcuj contains helper util and test code for
// WindowArrangementCUJ.
package windowarrangementcuj
import (
"context"
"net/http"
"net/http/httptest"
"regexp"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/pointer"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/coords"
"chromiumos/tast/testing"
)
// TestParam holds parameters of window arrangement cuj test variations.
type TestParam struct {
BrowserType browser.Type
Tablet bool
}
// Connections holds things that facilitate interaction with the DUT.
type Connections struct {
// Chrome interacts with the currently-running Chrome instance via
// the Chrome DevTools protocol:
// https://chromedevtools.github.io/devtools-protocol/
Chrome *chrome.Chrome
// Source is used to create new chrome.Conn connections.
Source ash.ConnSource
// TestConn is a connection to ash chrome.
TestConn *chrome.TestConn
// Cleanup resets everything to a clean state. It only needs to be
// called if SetupChrome succeeds.
Cleanup func(ctx context.Context) error
// CloseBlankTab closes the blank tab that is created when lacros
// is started.
CloseBlankTab func(ctx context.Context) error
// BrowserTestConn is a connection to ash chrome or lacros chrome,
// depending on the browser in use.
BrowserTestConn *chrome.TestConn
// BrowserType is the browser type.
BrowserType browser.Type
// PipVideoTestURL is the URL of the PIP video test page.
PipVideoTestURL string
}
// SetupChrome creates ash-chrome or lacros-chrome based on test parameters.
func SetupChrome(ctx, closeCtx context.Context, s *testing.State) (*Connections, error) {
testParam := s.Param().(TestParam)
var cleanupActionsInReverseOrder []action.Action
connection := &Connections{
Cleanup: func(ctx context.Context) error {
var firstErr error
for i := len(cleanupActionsInReverseOrder) - 1; i >= 0; i-- {
if err := cleanupActionsInReverseOrder[i](ctx); firstErr == nil {
firstErr = err
}
}
return firstErr
},
CloseBlankTab: func(ctx context.Context) error { return nil },
}
var l *lacros.Lacros
ok := false
defer func() {
if !ok {
if err := connection.Cleanup(closeCtx); err != nil {
s.Error("Failed to clean up after detecting error condition: ", err)
}
}
}()
connection.BrowserType = testParam.BrowserType
if testParam.BrowserType == browser.TypeAsh {
connection.Chrome = s.FixtValue().(chrome.HasChrome).Chrome()
connection.Source = connection.Chrome
var err error
connection.BrowserTestConn, err = connection.Chrome.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get TestAPIConn")
}
} else {
var err error
connection.Chrome, l, connection.Source, err = lacros.Setup(ctx, s.FixtValue(), browser.TypeLacros)
if err != nil {
return nil, errors.Wrap(err, "failed to setup lacros")
}
cleanupActionsInReverseOrder = append(cleanupActionsInReverseOrder, func(ctx context.Context) error {
lacros.CloseLacros(ctx, l)
return nil
})
if connection.BrowserTestConn, err = l.TestAPIConn(ctx); err != nil {
return nil, errors.Wrap(err, "failed to get lacros TestAPIConn")
}
}
srv := httptest.NewServer(http.FileServer(s.DataFileSystem()))
cleanupActionsInReverseOrder = append(cleanupActionsInReverseOrder, func(ctx context.Context) error {
srv.Close()
return nil
})
connection.PipVideoTestURL = srv.URL + "/pip.html"
var err error
connection.TestConn, err = connection.Chrome.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to connect to test api")
}
if testParam.BrowserType == browser.TypeLacros {
connection.CloseBlankTab = func(ctx context.Context) error {
return l.Browser().CloseWithURL(ctx, chrome.NewTabURL)
}
}
ok = true
return connection, nil
}
// cleanUp is used to execute a given cleanup action and report
// the resulting error if it is not nil. The intended usage is:
//
// func Example(ctx, closeCtx context.Context) (retErr error) {
// ...
// defer cleanUp(closeCtx, action.Named("description of cleanup action", cleanup), &retErr)
// ...
// }
func cleanUp(ctx context.Context, cleanup action.Action, retErr *error) {
if err := cleanup(ctx); err != nil {
if *retErr == nil {
*retErr = err
} else {
testing.ContextLog(ctx, "Cleanup failed: ", err)
testing.ContextLog(ctx, "Note: This cleanup failure is not the first error. The first error will be reported after all cleanup actions have been attempted")
}
}
}
// combineTabs is used to merge two browser windows, each consisting
// of a single tab, into one browser window with two tabs.
func combineTabs(ctx context.Context, tconn *chrome.TestConn, ui *uiauto.Context, pc pointer.Context, duration time.Duration) (retErr error) {
cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, false)
if err != nil {
return errors.Wrap(err, "failed to ensure clamshell mode")
}
defer func() {
if err := cleanup(ctx); retErr == nil && err != nil {
retErr = errors.Wrap(err, "failed to clean up after ensuring clamshell mode")
}
}()
tab := nodewith.Role(role.Tab).HasClass("Tab")
tabPIP := tab.NameContaining("/pip.html - ")
tabNoPIP := tab.NameRegex(regexp.MustCompile("/pip.html$"))
firstTabRect, err := ui.Location(ctx, tabNoPIP)
if err != nil {
return errors.Wrap(err, "failed to get the location of the first tab")
}
secondTabRect, err := ui.Location(ctx, tabPIP)
if err != nil {
return errors.Wrap(err, "failed to get the location of the second tab")
}
if err := pc.Drag(
firstTabRect.CenterPoint(),
pc.DragTo(firstTabRect.BottomCenter(), duration),
pc.DragTo(secondTabRect.CenterPoint(), duration),
)(ctx); err != nil {
return errors.Wrap(err, "failed to drag one browser tab to the other")
}
ws, err := getAllNonPipWindows(ctx, tconn)
if err != nil {
return errors.Wrap(err, "failed to obtain the window list")
}
if len(ws) != 1 {
return errors.Errorf("unexpected number of windows after trying to merge: got %d; expected 1", len(ws))
}
return nil
}
// removeExtraDesk removes the active desk and then ensures that a window that
// was on this removed desk is active. This window activation is important for
// the drag in combineTabs, because the tab to be dragged needs to be on top.
func removeExtraDesk(ctx context.Context, tconn *chrome.TestConn) error {
w, err := ash.FindWindow(ctx, tconn, func(w *ash.Window) bool { return w.OnActiveDesk })
if err != nil {
return errors.Wrap(err, "failed to find window on active desk")
}
if err := ash.RemoveActiveDesk(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to remove desk")
}
if err := w.ActivateWindow(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to ensure suitable window activation")
}
return nil
}
// dragAndRestore performs a drag beginning at the first given point, proceeding
// through the others in order, and ending back at the first given point. Before
// ending the drag, dragAndRestore waits until every window has the same bounds
// as before the drag (as expected because the drag is a closed loop).
func dragAndRestore(ctx context.Context, tconn *chrome.TestConn, pc pointer.Context, duration time.Duration, p ...coords.Point) error {
if len(p) < 2 {
return errors.Errorf("expected at least two drag points, got %v", p)
}
wsInitial, err := getAllNonPipWindows(ctx, tconn)
if err != nil {
return errors.Wrap(err, "failed to get windows")
}
verifyBounds := func(ctx context.Context) error {
for _, wInitial := range wsInitial {
wNow, err := ash.GetWindow(ctx, tconn, wInitial.ID)
if err != nil {
return errors.Wrapf(err, "failed to look up %q window by ID %d (the app probably crashed)", wInitial.Title, wInitial.ID)
}
if !wNow.BoundsInRoot.Equals(wInitial.BoundsInRoot) {
return errors.Errorf("%q window bounds not restored; changed from %v to %v", wNow.Title, wInitial.BoundsInRoot, wNow.BoundsInRoot)
}
}
return nil
}
verifyBoundsTimeout := &testing.PollOptions{Timeout: time.Minute}
var dragSteps []uiauto.Action
for i := 1; i < len(p); i++ {
dragSteps = append(dragSteps, pc.DragTo(p[i], duration))
}
dragSteps = append(dragSteps, pc.DragTo(p[0], duration), func(ctx context.Context) error {
if err := testing.Poll(ctx, verifyBounds, verifyBoundsTimeout); err != nil {
return errors.Wrap(err, "failed to wait for expected window bounds before ending drag")
}
return nil
})
if err := pc.Drag(p[0], dragSteps...)(ctx); err != nil {
return errors.Wrap(err, "failed to drag")
}
return nil
}
// getAllNonPipWindows calls ash.GetAllWindows and filters out PIP windows
// because they are not supposed to be returned by ash.GetAllWindows in the
// first place (see b/252552657#comment7).
// TODO(b/252552657): When the bug is fixed, remove this and update callers to
// use ash.GetAllWindows directly.
func getAllNonPipWindows(ctx context.Context, tconn *chrome.TestConn) ([]*ash.Window, error) {
ws, err := ash.GetAllWindows(ctx, tconn)
if err != nil {
return nil, err
}
var filtered []*ash.Window
for _, w := range ws {
if w.State != ash.WindowStatePIP {
filtered = append(filtered, w)
}
}
return filtered, nil
}
|
package prices
import (
"time"
"github.com/jeb2239/diframeworks/lib/chrono"
)
type PriceRecord struct {
Price int
Name string
TimeStamp time.Time
}
type IStore interface {
GetPrices() []PriceRecord
}
type Store struct {
timeProvider chrono.ITimeProvider
}
func NewPricesStore(tp chrono.ITimeProvider) IStore {
return &Store{
timeProvider: tp,
}
}
func (s *Store) GetPrices() []PriceRecord {
return []PriceRecord{
{
Price: 22,
Name: "IBM",
TimeStamp: s.timeProvider.GetTime(),
},
{
Price: 33,
Name: "GOOG",
TimeStamp: s.timeProvider.GetTime(),
},
}
}
|
package github
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_resolveEndpoint(t *testing.T) {
// test that returns resolved endpoint
s, err := resolveEndpoint("foo//bar/.///././../baz/../qux?q=a&x=y")
assert.NoError(t, err)
assert.Equal(t, "/foo/bar/qux?q=a&x=y", s)
// test that returns error
for _, v := range []string{
"https://",
"https://api.example.com/foo",
"foo#fragment",
} {
s, err = resolveEndpoint(v)
assert.Empty(t, s)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid endpoint")
}
}
|
package scalar2
import (
"time"
"github.com/MagalixCorp/magalix-agent/v2/kuber"
"github.com/MagalixTechnologies/log-go"
)
func InitScalars(
logger *log.Logger,
kube *kuber.Kube,
observer_ *kuber.Observer,
dryRun bool,
) {
sl := NewScannerListener(logger, observer_)
oomKilledProcessor := NewOOMKillsProcessor(
logger,
kube,
observer_,
time.Second,
dryRun,
)
sl.AddPodListener(oomKilledProcessor)
go oomKilledProcessor.Start()
go sl.Start()
}
|
// Package match is used to test matching a bundles to a target on the command line.
//
// It's not used by fleet, but it is available in the fleet CLI as "test" sub
// command. The tests in fleet-examples use it.
package match
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"github.com/rancher/fleet/internal/bundlereader"
"github.com/rancher/fleet/internal/cmd/controller/options"
"github.com/rancher/fleet/internal/cmd/controller/target/matcher"
"github.com/rancher/fleet/internal/helmdeployer"
"github.com/rancher/fleet/internal/manifest"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
"github.com/rancher/wrangler/pkg/yaml"
)
type Options struct {
Output io.Writer
BaseDir string
BundleSpec string
BundleFile string
ClusterName string
ClusterGroup string
ClusterLabels map[string]string
ClusterGroupLabels map[string]string
Target string
}
func Match(ctx context.Context, opts *Options) error {
if opts == nil {
opts = &Options{}
}
var (
bundle *fleet.Bundle
err error
)
if opts.BundleFile == "" {
bundle, _, err = bundlereader.Open(ctx, "test", opts.BaseDir, opts.BundleSpec, nil)
if err != nil {
return err
}
} else {
data, err := os.ReadFile(opts.BundleFile)
if err != nil {
return err
}
bundle = &fleet.Bundle{}
if err := yaml.Unmarshal(data, bundle); err != nil {
return err
}
}
bm, err := matcher.New(bundle)
if err != nil {
return err
}
if opts.Target == "" {
m := bm.Match(opts.ClusterName, map[string]map[string]string{
opts.ClusterGroup: opts.ClusterGroupLabels,
}, opts.ClusterLabels)
return printMatch(bundle, m, opts.Output)
}
return printMatch(bundle, bm.MatchForTarget(opts.Target), opts.Output)
}
func printMatch(bundle *fleet.Bundle, target *fleet.BundleTarget, output io.Writer) error {
if target == nil {
return errors.New("no match found")
}
fmt.Fprintf(os.Stderr, "# Matched: %s\n", target.Name)
if output == nil {
return nil
}
opts := options.Merge(bundle.Spec.BundleDeploymentOptions, target.BundleDeploymentOptions)
manifest, err := manifest.New(bundle.Spec.Resources)
if err != nil {
return err
}
objs, err := helmdeployer.Template(bundle.Name, manifest, opts)
if err != nil {
return err
}
data, err := yaml.Export(objs...)
if err != nil {
return err
}
_, err = io.Copy(output, bytes.NewBuffer(data))
return err
}
|
// https://leetcode.com/problems/flood-fill/
package leetcode_go
func floodFill(image [][]int, sr int, sc int, newColor int) [][]int {
vis := [][]bool{}
for i := 0; i < len(image); i++ {
tmp := []bool{}
for j := 0; j < len(image[0]); j++ {
tmp = append(tmp, false)
}
vis = append(vis, tmp)
}
helperP733(image, sr, sc, newColor, vis)
return image
}
func helperP733(image [][]int, sr int, sc int, newColor int, vis [][]bool) {
dirs := [][]int{{0, 1}, {0, -1}, {1, 0}, {-1, 0}}
pre := image[sr][sc]
image[sr][sc] = newColor
for _, dir := range dirs {
nr, nc := sr+dir[0], sc+dir[1]
if nr >= 0 && nr < len(image) && nc >= 0 && nc < len(image[0]) && image[nr][nc] == pre && !vis[nr][nc] {
vis[nr][nc] = true
helperP733(image, nr, nc, newColor, vis)
}
}
}
|
// pseudo.go implements pseudo3.23.
// NOTES:
// 1. Input is from stdin - c_src#readDimacsFileCreateList.
// This looks a little cludgy. main()/Testxxx() should pass in a file
// handle that may be os.Stdin.
// 2. In RecoverFlow() use gap value based on pseudoCtx.Lowestlabel value.
// 3. All timing/profiling is out in main()/Testxxx - so don't include in this package.
// 4. main() in C source code is really just a test ... implement in pseudo_test.go.
package pseudo
import (
"bufio"
"encoding/json"
"fmt"
"io"
"os"
)
// global variables
var lowestStrongLabel uint
var highestStrongLabel uint
var adjacencyList []*node
var strongRoots []*root
var arcList []*arc
var labelCount []uint
var numNodes, numArcs, source, sink uint
// local context
type context struct {
DisplayCut bool
DisplayFlow bool
LowestLabel bool
FifoBucket bool
// Stats bool // always collect stats, reporting requires call to StatsJSON
}
type statstics struct {
NumPushes uint `json:"numPushes"`
NumMergers uint `json:"numMergers"`
NumRelabels uint `json:"numRelabels"`
NumGaps uint `json:"numGaps"`
NumArcScans uint `json:"numArcScans"`
}
var stats statistics
// StatsJSON returns the runtime stats as a JSON object if they're
// requested as part of Config - "stats":true.
func StatsJSON() string {
j, _ := json.Marshal(stats)
return string(j)
}
// necessary initialization
func init() {
labelCount = make([]uint, 0)
}
// the arc object
type arc struct {
from *node
to *node
flow uint
capacity uint
direction uint
}
// (*arc) pushUpward
// (*arc) pushDownward
// Initialize a new arc value.
// in-lined
// func newArc() *arc {
// return &arc{direction: 1}
// }
// the node object
type node struct {
visited uint
numAdjacent uint
number uint
label uint
excess int
parent *node
childList *node
nextScan *node
numberOutOfTree uint
outOfTree []*arc // was **Arc in C, looking at CreateOutOfTree, we're dealing with a pool of Arc's
nextarc uint
arcToParent *arc
next *node
}
// Newnode returns an initialized node value.
// in-lined
// func newNode(n uint) *node {
// var u uint
// labelCount = append(labelCount, u)
// return &node{number: n}
// }
// (*node) liftAll
func (n *node) liftAll() {
var temp *node
var current = n
current.nextScan = current.childList
labelCount[current.label]--
current.label = numNodes
for ; current != nil; current = current.parent {
for current.nextScan != nil {
temp = current.nextScan
current.nextScan = current.nextScan.next
current = temp
current.nextScan = current.childList
labelCount[current.label]--
current.label = numNodes
}
}
}
// (*node) createOutOfTree allocates arc's for adjacent nodes.
func (n *node) createOutOfTree() {
n.outOfTree = make([]*arc, n.numAdjacent) // OK if '0' are allocated
}
// (*node) addOutOfTreenode
func (n *node) addOutOfTreeNode(out *arc) {
n.outOfTree[n.numberOutOfTree] = out
n.numberOutOfTree++
}
// (*node) processRoot. 'n' is 'strongRoot' in C source
func (n *node) processRoot() {
var temp, weakNode *node
var out *arc
strongNode := n
n.nextScan = n.childList
if out = n.findWeakNode(weakNode); out != nil {
weakNode.merge(n, out)
n.pushExcess()
return
}
n.checkChildren()
for strongNode != nil {
for strongNode.nextScan != nil {
temp = strongNode.nextScan
strongNode.nextScan = strongNode.nextScan.next
strongNode = temp
strongNode.nextScan = strongNode.childList
if out = findWeakNode(strongNode, &weakNode); out != nil {
weakNode.merge(strongNode, out)
strongRoot.pushExcess()
return
}
strongNode.checkChildren()
}
if strongNode = strongNode.parent; strongNode != nil {
strongNode.checkChildren()
}
}
n.addToStrongBucket(strongRoots[strongRoot.label])
if !pseudoCtx.LowestLabel {
highestStrongLabel++
}
}
// (*node) merge. 'n' is 'parent' in C source.
func (n *node) merge(child *node, newArc *arc) {
var oldArc *arc
current := child
newParent := n
var oldParent *node
stats.NumMergers++ // unlike C source always calc stats
for current.n != nil {
oldArc = current.arcToParent
current.arcToParent = newArc
oldParent = current.n
oldParent.breakRelationship(current)
newParent.addRelationship(current)
newParent = current
current = oldParent
newArc = oldArc
newArc.direction = 1 - newArc.direction
}
current.arcToParent = newArc
newParent.addRelationship(current)
}
// (*node) pushExcess. 'n' is 'strongRoot' in C source.
func (n *node) pushExcess() {
var current, parent *node
var arcToParent *arc
prevEx := 1
for current = n; current.excess > 0 && current.parent != nil; current = parent {
parent = current.parent
prevEx = parent.excess
arcToParent = current.arcToParent
if arcToParent.direction > 0 {
arcToParent.pushUpward(current, parent, arcToParent.capacity-arcToParent.flow)
} else {
arcToParent.pushDownward(current, parent, arcToParent.flow)
}
}
if current.excess > 0 && prevEx <= 0 {
if pseudoCtx.LowestLabel {
lowestStrongLabel = current.label
}
current.addToStrongBucket(ns[current.label])
}
}
// (*node) breakRelationship
// (*node) addRelationship
// (*node) findWeakNode(weakNode *node)
// (*node) checkChildren
// the root object
type root struct {
start *node
end *node
}
// newRoot is a wrapper on new(root) to mimic source.
// in-lined
// func newRoot() *root {
// return new(root)
// }
// free reinitializes a root value.
func (r *root) free() {
r.start = nil
r.end = nil
}
// addToStrongBucket may be better as a *node method ... need to see usage elsewhere.
func (r *root) addToStrongBucket(n *node) {
if pseudoCtx.FifoBucket {
if r.start != nil {
r.end.next = n
r.end = n
n.next = nil
} else {
r.start = n
r.end = n
n.next = nil
}
} else {
n.next = r.start
r.start = n
return
}
}
// ================ public functions =====================
// ReadDimacsFile implements readDimacsFile of C source code.
func ReadDimacsFile(fh *os.File) error {
var i, capacity, numLines, from, to, first, last uint
var word []byte
var ch, ch1 byte
buf := bufio.NewReader(fh)
var atEOF bool
for {
if atEOF {
break
}
line, err := buf.ReadBytes('\n')
if err != io.EOF {
return err
} else if err == io.EOF {
if len(line) == 0 {
break // nothing more to process
}
// ... at EOF with data but no '\n' line termination.
// While not necessary for os.STDIN; it can happen in a file.
atEOF = true
} else {
// Strip off EOL.
line = line[:len(line)-1]
}
numLines++
switch line[0] {
case 'p':
if _, err := fmt.Sscanf(string(line), "%v %s %d %d", &ch, word, &numNodes, &numArcs); err != nil {
return err
}
adjacencyList = make([]*node, numNodes)
strongRoots = make([]*root, numNodes)
labelCount = make([]uint, numNodes)
arcList = make([]*arc, numArcs)
var i uint
for i = 0; i < numNodes; i++ {
// in-lined: strongRoots[i] = newRoot()
strongRoots[i] = new(root)
// in-lined: adjacencyList[i] = &newNode(i + 1)
adjacencyList[i] = &node{number: i + 1}
var u uint
labelCount = append(labelCount, u)
}
for i = 0; i < numArcs; i++ {
// in-lined: arcList[i] = newArc()
arcList[i] = &arc{direction: 1}
}
first = 0
last = numArcs - 1
case 'a':
if _, err := fmt.Scanf(string(line), "%v %d %d %d", &ch, &from, &to, &capacity); err != nil {
return err
}
if (from+to)%2 != 0 {
arcList[first].from = adjacencyList[from-1]
arcList[first].to = adjacencyList[to-1]
arcList[first].capacity = capacity
first++
} else {
arcList[last].from = adjacencyList[from-1]
arcList[last].to = adjacencyList[to-1]
arcList[last].capacity = capacity
last--
}
adjacencyList[from-1].numAdjacent++
adjacencyList[to-1].numAdjacent++
case 'n':
if _, err := fmt.Scanf(string(line), "%v %d %v", &ch, &i, &ch1); err != nil {
return err
}
if ch1 == 's' {
source = i
} else if ch1 == 't' {
sink = i
} else {
return fmt.Errorf("unrecognized character %v on line %d", ch1, numLines)
}
case '\n', 'c':
continue // catches blank lines and "comment" lines - blank lines not in spec.
default:
return fmt.Errorf("unknown data: %s", string(line))
}
}
for i = 0; i < numNodes; i++ {
adjacencyList[i].createOutOfTree()
}
for i = 0; i < numArcs; i++ {
to = arcList[i].to.number
from = arcList[i].from.number
capacity = arcList[i].capacity
if !(source == to || sink == from || from == to) {
if source == from && to == sink {
arcList[i].flow = capacity
} else if from == source || to != sink {
adjacencyList[from-1].addOutOfTreeNode(arcList[i])
} else if to == sink {
adjacencyList[to-1].addOutOfTreeNode(arcList[i])
} else {
adjacencyList[from-1].addOutOfTreeNode(arcList[i])
}
}
}
return nil
}
// SimpleInitialization implements simpleInitialization of C source code.
func SimpleInitialization() {
var i, size uint
var tempArc *arc
size = adjacencyList[source-1].numOutOfTree
for i := 0; i < size; i++ {
tempArc = adjacencyList[source-1].outOfTree[i]
tempArc.flow = tempArc.capacity
tempArc.to.excess += tempArc.capacity
}
size = adjacencyList[sink-1].numOutOfTree
for i := 0; i < size; i++ {
tempArc = adjacencyList[sink-1].outOfTree[i]
tempArc.flow = tempArc.capacity
tempArc.from.excess -= tempArc.capacity
}
adjacencyList[source-1].excess = 0
adjacencyList[sink-1].excess = 0
for i := 0; i < numNodes; i++ {
if adjacencyList[i].excess > 0 {
adjacencyList[i].label = 1
labelCount[1]++
adjacencyList[i].addToStrongBucket(&strongRoots[1])
}
}
adjacencyList[source-1].label = numNodes
adjacencyList[sink-1].label = 0
labelCount[0] = (numNodes - 2) - labelCount[1]
}
// FlowPhaseOne implements pseudoFlowPhaseOne of C source code.
func FlowPhaseOne() {
var strongRoot *node
if pseudoCtx.LowestLable {
strongRoot = getLowestStrongRoot
for ; strongRoot != nil; strongRoot = getLowestStrongRoot() {
strongRoot.processRoot()
}
} else {
strongRoot = getHighestStrongRoot()
for ; strongRoot != nil; strongRoot = getHighestStrongRoot() {
strongRoot.processRoot()
}
}
}
// RecoverFlow implements recoverFlow of C source code.
// It internalize setting 'gap' value.
func RecoverFlow() {
}
// Result returns scan of arc/node results in Dimac syntax.
//
// Example for input file "maxflow.net":
// c maxflow.net
// c
// c Dimacs-format maximum flow result file
// c generated by lp_solve
// c
// c Solution
// s 15
// c
// c SRC DST FLOW
// f 1 2 5
// f 1 3 10
// ...
func Result(file string) []string {
var result []string
return result
}
|
package main
import (
"fmt"
"os/exec"
"strconv"
"strings"
)
// dependency describes a dependency
type dependency struct {
Name string // name of dependency
Version string // minimum version, a.b.c
Cmd string // cmd to get version
}
// loadDependencies load dependencies and version requirements
//
// TODO load dependencies from configuration file
func loadDependencies() ([]*dependency, error) {
deps := []*dependency{
{
Name: "protoc",
Version: "v3.6.0",
Cmd: "protoc --version | awk '{print $2}'",
}, {
Name: "protoc-gen-go",
Version: "",
Cmd: "",
},
}
return deps, nil
}
// checkDependencies check if dependencies meet the version requirements
func checkDependencies(deps []*dependency) error {
for _, dep := range deps {
// check installed or not
_, err := exec.LookPath(dep.Name)
if err != nil {
return fmt.Errorf("%s not found, %v", dep.Name, err)
}
// skip checking if cmd/version not specified
if len(dep.Cmd) == 0 || len(dep.Version) == 0 {
continue
}
// run specified cmd to get version
cmd := exec.Command("sh", "-c", dep.Cmd)
buf, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("%s load version, %v, \n\t%s\n", dep.Name, err, string(buf))
}
err = checkVersion(string(buf), dep.Version)
if err != nil {
return fmt.Errorf("%s mismatch, %v", dep.Name, err)
}
}
return nil
}
// checkVersion check if version meet the requirement
func checkVersion(version, required string) error {
if len(version) != 0 && version[0] == 'v' || version[0] == 'V' {
version = version[1:]
}
if len(required) != 0 && required[0] == 'v' || required[0] == 'V' {
required = required[1:]
}
m1, n1, r1 := versions(version)
m2, n2, r2 := versions(required)
if !(m1 >= m2 && n1 >= n2 && r1 >= r2) {
return fmt.Errorf("require version: %s", required)
}
return nil
}
// versions extract the major, minor and revision (patching) version
func versions(ver string) (major, minor, revision int) {
var err error
vv := strings.Split(ver, ".")
if len(vv) >= 1 {
major, err = strconv.Atoi(vv[0])
if err != nil {
return
}
}
if len(vv) >= 2 {
minor, err = strconv.Atoi(vv[1])
if err != nil {
return
}
}
if len(vv) >= 3 {
revision, err = strconv.Atoi(vv[2])
if err != nil {
return
}
}
return
}
|
/*
* Copyright (C) 2018 eeonevision
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package client
import (
"encoding/json"
"errors"
"github.com/tendermint/tendermint/rpc/core/types"
"github.com/eeonevision/anychaindb/crypto"
"github.com/eeonevision/anychaindb/state"
"github.com/eeonevision/anychaindb/transaction"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
)
// baseClient struct contains config
// parameters for performing requests.
type baseClient struct {
key *crypto.Key
endpoint string
mode string
accountID string
tm client.Client
}
// newHTTPClient initializes new base client instance.
func newHTTPClient(endpoint, mode string, key *crypto.Key, accountID string) *baseClient {
// Set default mode
switch mode {
case "sync":
mode = "sync"
break
case "async":
mode = "async"
break
case "commit":
mode = "commit"
break
default:
mode = "sync"
}
tm := client.NewHTTP(endpoint, "/websocket")
return &baseClient{key, endpoint, mode, accountID, tm}
}
func (c *baseClient) addAccount(acc *state.Account) error {
var err error
txBytes, err := acc.MarshalMsg(nil)
if err != nil {
return err
}
tx := transaction.New(transaction.AccountAdd, c.accountID, txBytes)
bs, _ := tx.ToBytes()
return c.broadcastTx(bs)
}
func (c *baseClient) getAccount(id string) (*state.Account, error) {
resp, err := c.tm.ABCIQuery("accounts", []byte(id))
if resp == nil {
return nil, errors.New("empty ABCI response")
}
if err != nil {
return nil, err
}
if resp.Response.IsErr() {
return nil, errors.New(resp.Response.GetLog())
}
acc := &state.Account{}
if err := json.Unmarshal(resp.Response.GetValue(), &acc); err != nil {
return nil, err
}
return acc, nil
}
func (c *baseClient) searchAccounts(searchQuery []byte) ([]state.Account, error) {
resp, err := c.tm.ABCIQuery("accounts/search", searchQuery)
if resp == nil {
return nil, errors.New("empty ABCI response")
}
if err != nil {
return nil, err
}
if resp.Response.IsErr() {
return nil, errors.New(resp.Response.GetLog())
}
acc := []state.Account{}
if err := json.Unmarshal(resp.Response.GetValue(), &acc); err != nil {
return nil, err
}
return acc, nil
}
func (c *baseClient) addPayload(cv *state.Payload) error {
txBytes, err := cv.MarshalMsg(nil)
if err != nil {
return err
}
tx := transaction.New(transaction.PayloadAdd, c.accountID, txBytes)
if err := tx.Sign(c.key); err != nil {
return err
}
bs, _ := tx.ToBytes()
return c.broadcastTx(bs)
}
func (c *baseClient) getPayload(id string) (*state.Payload, error) {
resp, err := c.tm.ABCIQuery("payloads", []byte(id))
if resp == nil {
return nil, errors.New("empty ABCI response")
}
if err != nil {
return nil, err
}
if resp.Response.IsErr() {
return nil, errors.New(resp.Response.GetLog())
}
res := &state.Payload{}
if err := json.Unmarshal(resp.Response.GetValue(), &res); err != nil {
return nil, err
}
return res, nil
}
func (c *baseClient) searchPayloads(searchQuery []byte) ([]state.Payload, error) {
resp, err := c.tm.ABCIQuery("payloads/search", searchQuery)
if resp == nil {
return nil, errors.New("empty ABCI response")
}
if err != nil {
return nil, err
}
if resp.Response.IsErr() {
return nil, errors.New(resp.Response.GetLog())
}
res := []state.Payload{}
if err := json.Unmarshal(resp.Response.GetValue(), &res); err != nil {
return nil, err
}
return res, nil
}
func (c *baseClient) broadcastTx(bs []byte) error {
var res interface{}
var err error
switch c.mode {
case "async":
res, err = c.tm.BroadcastTxAsync(types.Tx(bs))
case "sync":
res, err = c.tm.BroadcastTxSync(types.Tx(bs))
case "commit":
res, err = c.tm.BroadcastTxCommit(types.Tx(bs))
}
// Check transport errors
if err != nil {
return err
}
// Check special empty case
if res == nil {
return errors.New("empty response")
}
// Check for async/sync response
if r, ok := res.(*core_types.ResultBroadcastTx); ok && r.Code != 0 {
return errors.New(r.Log)
}
// Check for commit response
if r, ok := res.(*core_types.ResultBroadcastTxCommit); ok && (r.CheckTx.Code != 0 || r.DeliverTx.Code != 0) {
return errors.New("check tx error: " + r.CheckTx.Log + "; deliver tx error: " + r.DeliverTx.Log)
}
return nil
}
|
package rpcconn
import _ "reflect"
|
package entity
import "time"
type RecordNotFound error
type MoreThanOneRecordFound error
type Property int
const (
Username Property = iota
ID
BookID
StartTime
EndTime
StartLocation
EndLocation
DateCreated
DateModified
Version
)
type Entry struct {
Username *string `json:"username"`
ID *string `json:"id"`
BookID *string `json:"book_id"`
StartTime *time.Time `json:"start_time"`
EndTime *time.Time `json:"end_time"`
StartLocation *int64 `json:"start_location"`
EndLocation *int64 `json:"end_location"`
DateCreated *time.Time `json:"date_created"`
DateModified *time.Time `json:"date_modified"`
Version *int64 `json:"version"`
}
|
package main
import "fmt"
func main() {
c := make(chan int)
defer close(c)
go recChan(c)
sendChan(c)
fmt.Println("Done")
}
func recChan(c chan<- int) {
c <- 33
}
func sendChan(c <-chan int) {
fmt.Printf("%v\n", <-c)
}
|
package main
import "fmt"
type person struct {
first string
last string
age int
}
func main() {
p1 := person {
first: "James",
last: "Bond",
age: 22,
}
p2 := person{
first: "Miss",
last: "Moneypenny",
age: 21,
}
fmt.Println(p1)
fmt.Println(p2)
fmt.Println(p1.age, p1.last)
fmt.Println(p2.age, p2.last)
}
//Struct
//A struct is an composite data type. (composite data types, aka, aggregate data types, aka, complex data types). Structs allow us to compose together values of different types.
//code: https://play.golang.org/p/hNI_rSK-C6
//video:081
|
package main
import (
"github.com/lethal-bacon0/WebnovelYoinker/pkg/terminal"
)
func main() {
terminal.StartTerminal()
}
|
package main
import (
"math"
"github.com/davecgh/go-spew/spew"
)
type Point struct {
x float64
y float64
}
// 構造体をコピーせずに、構造体のポインタを受け取る。
func distance(p, q *Point) float64 {
dx := p.x - q.x
dy := p.y - q.y
return math.Sqrt(dx*dx + dy*dy)
}
func main() {
// 構造体のポインタを定義する。
var p *Point = &Point{} // var p *Point だけでは当然 nil である。
var q *Point = &Point{10, 10} // これで初期化した構造体のアドレス番地が返るようだ。
r := new(Point) // でも普通は new 使ってゼロ値で初期化済みの構造体のアドレス番地返しますよね。
r.x, r.y = 100, 100 // new したら値は明示的に代入する。
spew.Dump(p, q, r)
spew.Dump(p.x, p.y, q.x, q.y, r.x, r.y)
spew.Println(distance(p, q))
spew.Println(distance(p, r))
spew.Println(distance(q, r))
}
|
package main
import (
"container/list"
"container/ring"
"fmt"
)
func main() {
// list
l := list.New()
l.PushBack("123")
l.PushFront("000")
for it := l.Front(); it != nil; it = it.Next() {
fmt.Println(it.Value)
}
// ring
r := ring.New(7)
for i := 0; i < 10; i++ {
r.Value = i
r = r.Next()
}
for i := 0; i < 15; i++ {
fmt.Println(r.Value)
r = r.Next()
}
}
|
package main
import "fmt"
import "./src/uc"
func main() {
str1 := "USING package uc!"
fmt.Println(uc.UpperCase(str1))
}
|
package main
/*
题目 #
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1]
题目大意 #
在数组中找到 2 个数之和等于给定值的数字,结果返回 2 个数字在数组中的下标。
*/
import (
"fmt"
)
func main(){
nums := []int{2, 7, 5, 11, 15, 23, 9, 11}
target := 16
fmt.Println("test1: ",test1(nums, target))
}
// 思路1:直接查找2个数的和=target
func test1(nums []int, target int) interface{}{
l := len(nums)
// 可能存在多个结果值
res := map[string][]int{}
for i:=0; i<l; i++{
for j:=i+1; j<l; j++{
if(nums[i] + nums[j] == target){
var arr []int
var s string
arr = append(arr, i, j)
s = fmt.Sprintf("%d-%d", i, j)
res[s] = arr
}
}
}
return res
}
|
package cmd
import (
"bufio"
"encoding/json"
"os"
"time"
"github.com/freecracy/todo/task"
"github.com/google/uuid"
)
func AppendData(p string) error {
os.Chdir(workDir)
f, err := os.OpenFile(pendFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
defer f.Close()
if err != nil {
return err
}
u1, _ := uuid.NewUUID()
u := u1.String()
l := task.Task{}
id, err := getId()
if err != nil {
return err
}
l.Id = id
l.Uuid = u
l.Project = p
l.Status = "pending"
l.Entry = time.Now().Format(time.RFC3339)
t, err := json.Marshal(l)
if err != nil {
return err
}
if _, err := f.WriteString(string(t) + "\n"); err != nil {
return err
}
os.Stdout.WriteString("add new task success \n")
return nil
}
func getId() (int, error) {
os.Chdir(workDir)
f, err := os.Open(pendFile)
if err != nil {
return 0, err
}
i := 0
scanner := bufio.NewScanner(f)
var t task.Task
for scanner.Scan() {
i++
if err := json.Unmarshal([]byte(scanner.Text()), &t); err != nil {
return 0, err
}
if t.Id != i {
return i, nil
}
}
i++
return i, nil
}
|
package main
import "fmt"
func crary(s string, num int) string {
str := []byte(s)
var nums = make([]int, len(str))
for i := 0; i < len(str); i++ {
nums[i] = int(str[i] - 'a')
}
nums[0] = nums[0] + num
for i := 1; i < len(nums); i++ {
nums[i] = nums[i] + nums[i-1]
}
for i := 0; i < len(str); i++ {
str[i] = byte((nums[i])%26) + 'a'
}
return string(str)
}
func decrary(s string, num int) string {
str := []byte(s)
var nums = make([]int, len(str))
for i := 0; i < len(str); i++ {
nums[i] = int(str[i] - 'a')
}
l := len(nums) - 1
for i := l; i >= 1; i-- {
if nums[i]-nums[i-1] < 0 {
nums[i] = nums[i] + 26
}
nums[i] = nums[i] - nums[i-1]
}
nums[0] = nums[0] - num
for i := 0; i < len(str); i++ {
str[i] = byte((nums[i])%26) + 'a'
}
return string(str)
}
func main() {
//var str string=abcdz
//fmt.Scan(&str)
//fmt.Println(str)
//var x int
//fmt.Scan(&x)
//fmt.Println(x)
//result:=decrary(str,x)
var str string = "degji"
result := decrary(str, 3)
fmt.Println(result)
//defmt.Println(a,b,c)
}
|
package main
import (
"fmt"
"syscall/js"
)
var c chan bool
func init() {
c = make(chan bool)
}
func add(this js.Value, i []js.Value) interface{} {
js.Global().Set("output", js.ValueOf(i[0].Int()+i[1].Int()))
println(js.ValueOf(i[0].Int() + i[1].Int()).String())
return js.ValueOf(i[0].Int() - i[1].Int())
}
func subtract(this js.Value, i []js.Value) interface{} {
js.Global().Set("output", js.ValueOf(i[0].Int()-i[1].Int()))
println(js.ValueOf("subtract!!").String())
println(js.ValueOf(i[0].Int() - i[1].Int()).String())
return js.ValueOf(i[0].Int() - i[1].Int())
}
func printMessage(this js.Value, inputs []js.Value) interface{} {
message := inputs[0].String()
document := js.Global().Get("document")
p := document.Call("createElement", "p")
p.Set("innerHTML", message)
document.Get("body").Call("appendChild", p)
// c <- true
return 1
}
func registerCallbacks() {
js.Global().Set("add", js.FuncOf(add))
js.Global().Set("subtract", js.FuncOf(subtract))
js.Global().Set("printMessage", js.FuncOf(printMessage))
}
func main() {
fmt.Println("Hello, WebAssembly!")
console_log := js.Global().Get("console").Get("log")
console_log.Invoke("Hello wasm! invoke by go")
js.Global().Call("eval", `
console.log("hello, wasm! in console");
`)
registerCallbacks()
js.Global().Set("println",
js.FuncOf(func(this js.Value, args []js.Value) interface{} {
println("hello callback")
println(args[0].String()) // Debug 语句 可以在浏览器调用的时候看到
return nil
}),
)
// printlnFn := js.Global().Get("println")
// printlnFn.Invoke()
// js.Global().Call("println", js.ValueOf("args!!!!"))
select {}
println("We are out of here")
}
|
package models
import (
db "github.com/SlaF/goinbar/lib"
)
type Event struct {
Title string
Body string
}
func (e Event) Say() string {
stmt, err := db.DBCon.Prepare("INSERT events SET name=?, description=?")
checkErr(err)
_, err = stmt.Exec(e.Title, e.Body)
checkErr(err)
return e.Title
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
|
package services
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"seeder/models"
"time"
)
type DeployerService struct {
HomePageUrl string
AccessToken string
ApiResponse *models.ApiResponse
ServerDeployments []*models.ServerDeployment
HttpClient *http.Client
}
func NewDeployerService(homePageUrl string, accessToken string) *DeployerService {
service := &DeployerService{}
service.HttpClient = &http.Client{Timeout: time.Minute}
service.HomePageUrl = homePageUrl
service.AccessToken = accessToken
return service
}
func (service *DeployerService) GetHomePageUrl() string {
return service.HomePageUrl
}
func (service *DeployerService) GetAccessToken() string {
return service.AccessToken
}
func (service *DeployerService) GetApiResponse() *models.ApiResponse {
return service.ApiResponse
}
func (service *DeployerService) GetServerDeployments() []*models.ServerDeployment {
return service.ServerDeployments
}
func (service *DeployerService) GetHttpClient() *http.Client {
return service.HttpClient
}
func (service *DeployerService) HttpClientGetDeployments() *models.ApiResponse {
req, err := http.NewRequest("GET", service.HomePageUrl+"deployments", nil)
if err != nil {
fmt.Print(err.Error())
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Token", service.AccessToken)
resp, err := service.HttpClient.Do(req)
if err != nil {
fmt.Print(err.Error())
}
defer resp.Body.Close()
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Print(err.Error())
}
apiResponse := &service.ApiResponse
err = json.Unmarshal(bodyBytes, apiResponse)
if err != nil {
fmt.Print(err.Error())
}
return service.ApiResponse
}
func (service *DeployerService) HttpClientGetDeploymentId(id string) *models.ApiResponse {
req, err := http.NewRequest("GET", service.HomePageUrl+"deployments/"+id, nil)
if err != nil {
fmt.Print(err.Error())
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Token", service.AccessToken)
resp, err := service.HttpClient.Do(req)
if err != nil {
fmt.Print(err.Error())
}
defer resp.Body.Close()
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Print(err.Error())
}
apiResponse := &service.ApiResponse
err = json.Unmarshal(bodyBytes, apiResponse)
if err != nil {
fmt.Print(err.Error())
}
return service.ApiResponse
}
func (service *DeployerService) HttpClientGetRemainingSlots() int {
maxDeployments := service.HttpClientGetEnvInit().GetDescription().(map[string]interface{})["MAX_DEPLOYMENTS"].(float64)
return int(maxDeployments) - len(service.HttpClientGetDeployments().GetDescription().([]interface{}))
}
func (service *DeployerService) HttpClientGetEnvInit() *models.ApiResponse {
req, err := http.NewRequest("GET", service.HomePageUrl+"envinit", nil)
if err != nil {
fmt.Print(err.Error())
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Token", service.AccessToken)
resp, err := service.HttpClient.Do(req)
if err != nil {
fmt.Print(err.Error())
}
defer resp.Body.Close()
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Print(err.Error())
}
apiResponse := &service.ApiResponse
json.Unmarshal(bodyBytes, apiResponse)
return service.ApiResponse
}
func (service *DeployerService) PostDeployment(deployment *models.ServerDeployment, deploymentFileContent []byte) *models.ApiResponse {
req, err := http.NewRequest("POST", service.HomePageUrl+"deployments", bytes.NewBuffer(deploymentFileContent))
if err != nil {
fmt.Print(err.Error())
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Token", service.AccessToken)
req.Header.Add("Deployment-Id", deployment.Id)
resp, err := service.HttpClient.Do(req)
if err != nil {
fmt.Print(err.Error())
}
defer resp.Body.Close()
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Print(err.Error())
}
apiResponse := &service.ApiResponse
json.Unmarshal(bodyBytes, apiResponse)
return service.ApiResponse
}
func (service *DeployerService) DeleteDeployments() *models.ApiResponse {
req, err := http.NewRequest("DELETE", service.HomePageUrl+"deployments", nil)
if err != nil {
fmt.Print(err.Error())
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Token", service.AccessToken)
resp, err := service.HttpClient.Do(req)
if err != nil {
fmt.Print(err.Error())
}
defer resp.Body.Close()
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Print(err.Error())
}
apiResponse := &service.ApiResponse
json.Unmarshal(bodyBytes, apiResponse)
return service.ApiResponse
}
func (service *DeployerService) DeleteDeploymentId(deployment *models.ServerDeployment) *models.ApiResponse {
req, err := http.NewRequest("DELETE", service.HomePageUrl+"deployments/"+deployment.Id, nil)
if err != nil {
fmt.Print(err.Error())
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Token", service.AccessToken)
resp, err := service.HttpClient.Do(req)
if err != nil {
fmt.Print(err.Error())
}
defer resp.Body.Close()
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Print(err.Error())
}
apiResponse := &service.ApiResponse
json.Unmarshal(bodyBytes, apiResponse)
return service.ApiResponse
}
|
package main
import (
"fmt"
"math/big"
"strconv"
)
func main() {
fmt.Printf("%d\n", sum(fac(100).String()))
}
func fac(n int) *big.Int {
var result *big.Int = big.NewInt(1)
for i := n; i > 0; i-- {
result.Mul(result, big.NewInt(int64(i)))
}
return result
}
func sum(number string) int {
var result int
for i := 0; i < len(number); i++ {
n, _ := strconv.Atoi(string(number[i]))
result += n
}
return result
}
|
package main
import (
"bytes"
"crypto/x509"
"encoding/pem"
"io/ioutil"
"os"
"github.com/ONSdigital/go-ns/log"
"github.com/ONSdigital/s3crypto"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func main() {
f, err := ioutil.ReadFile("testdata/private.pem")
if err != nil {
panic(err)
}
block, _ := pem.Decode(f)
if block == nil || block.Type != "RSA PRIVATE KEY" {
panic(err)
}
privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
panic(err)
}
region := "eu-west-1"
sess, err := session.NewSession(&aws.Config{Region: ®ion})
if err != nil {
panic(err)
}
size := 5 * 1024 * 1024
svc := s3crypto.New(sess, &s3crypto.Config{PrivateKey: privateKey, MultipartChunkSize: size})
bucket := "dp-frontend-florence-file-uploads"
key := "cpicoicoptest.csv"
b, err := ioutil.ReadFile("testdata/" + key)
if err != nil {
log.Error(err, nil)
return
}
acl := "public-read"
input := &s3.CreateMultipartUploadInput{
Bucket: &bucket,
Key: &key,
}
input.ACL = &acl
result, err := svc.CreateMultipartUpload(input)
if err != nil {
log.ErrorC("error creating mpu", err, nil)
return
}
log.Debug("created multi part upload", nil)
chunks := split(b, size)
var completedParts []*s3.CompletedPart
for i, chunk := range chunks {
partN := int64(i + 1)
partInput := &s3.UploadPartInput{
Body: bytes.NewReader(chunk),
Bucket: &bucket,
Key: &key,
PartNumber: &partN,
UploadId: result.UploadId,
}
res, err := svc.UploadPart(partInput)
if err != nil {
log.Error(err, nil)
return
}
log.Info("part completed", log.Data{"part": partN})
completedParts = append(completedParts, &s3.CompletedPart{
PartNumber: &partN,
ETag: res.ETag,
})
}
completeInput := &s3.CompleteMultipartUploadInput{
Bucket: &bucket,
Key: &key,
UploadId: result.UploadId,
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: completedParts,
},
}
cr, err := svc.CompleteMultipartUpload(completeInput)
if err != nil {
log.Error(err, nil)
return
}
log.Info("upload completed", log.Data{"result": cr})
log.Info("now getting file...", nil)
getInput := &s3.GetObjectInput{
Bucket: &bucket,
Key: &key,
}
out, err := svc.GetObject(getInput)
if err != nil {
log.Error(err, nil)
return
}
newf, err := os.Create("newcpi.csv")
if err != nil {
log.Error(err, nil)
return
}
defer newf.Close()
newB, err := ioutil.ReadAll(out.Body)
if err != nil {
log.Error(err, nil)
return
}
if _, err := newf.Write(newB); err != nil {
log.Error(err, nil)
return
}
}
func split(buf []byte, lim int) [][]byte {
var chunk []byte
chunks := make([][]byte, 0, len(buf)/lim+1)
for len(buf) >= lim {
chunk, buf = buf[:lim], buf[lim:]
chunks = append(chunks, chunk)
}
if len(buf) > 0 {
chunks = append(chunks, buf[:len(buf)])
}
return chunks
}
|
package mysql
import (
"github.com/jinzhu/gorm"
"go-sql/app/repositories"
)
type Storage struct {
readConn *gorm.DB
writeConn *gorm.DB
}
func NewStorage(readConn, writeConn *gorm.DB) repositories.StorageInterface {
return &Storage{
readConn:readConn,
writeConn:writeConn,
}
} |
package datagrid
import (
"flood_go/graphicsx"
"flood_go/text"
"flood_go/misc"
cfg "flood_go/config"
)
// Import external packages
import (
"github.com/veandco/go-sdl2/sdl"
)
func NormalizeSmellColor(smell int) int {
if smell == 0 { return 0}
if smell < 20 { return 20}
if smell < 100 { return smell}
if smell < 10000 { return int(smell / 100) + 99}
return 255
}
func NormalizeAmountColor(amount int, max_out bool) int {
s := amount
if max_out {
if s > 0 {
return 255
}
}
if s > 255 {
return 255
}
return s
}
/* un-implemented atm, in favour of pixelbased drawing */
func (this *DataGrid) Draw(graphics *graphicsx.Graphics, numbers_text *[256]*text.TextObject, cell_size int32, print_val int, print_text bool){
// set alpha
var alpha int
// Draw & Rearm Loop
var x = int32(0)
var y = int32(0)
for row := range this.Cells {
for col := range this.Cells[row] {
// get cell
current_cell := this.Cells[row][col]
// get draw value
draw_value := current_cell[uint8(print_val)]
if draw_value > 0 {
// set Cell color
// set alpha
alpha = misc.Max255Int(20, draw_value)
// set color
(*graphics).SetSDLDrawColor(this.BaseColor, uint8(alpha))
// create rect
rect := sdl.Rect{x, y, cell_size, cell_size}
// draw cell
(*graphics).Renderer.FillRect(&rect)
// draw text
if print_text {
(*graphics).Renderer.Copy((*numbers_text)[draw_value].Image.Texture, nil, &sdl.Rect{
x,
y,
(*numbers_text)[draw_value].Image.Width, (*numbers_text)[draw_value].Image.Height,
})
}
}
x += cell_size
}
x = 0
y += cell_size
}
}
func (this *DataGrid) Draw_PixelBased(done chan bool, row_start, row_end int, numbers_text *[256]*text.TextObject, print_val int, print_text bool){
// Shorthand
cell_size := int32(cfg.CELL_SIZE)
// references
var Cells = &(this.Cells)
// Convert SDL color to pixel value
var color = [4]byte{this.BaseColor.R, this.BaseColor.G, this.BaseColor.B, this.BaseColor.A}
// Draw & Rearm Loop
var x = int32(0)
var y = int32(0) + (int32(row_start)*cell_size)
var draw_value = 0
for row := row_start; row < row_end; row++ {
for col := 0; col < cfg.COLS; col++ {
// get cell
//current_cell := (*Cells)[row][col]
// get draw value
draw_value = (*Cells)[row][col][uint8(print_val)]
//index := row * col + col
//draw_value := int((*this.Amount)[3])
if draw_value > 0 {
/*
if print_val == 0 {
draw_value = NormalizeAmountColor(draw_value, false)
}
*/
if print_val == 1 {
draw_value = NormalizeSmellColor(draw_value)
}
color[3] = uint8(draw_value)
graphicsx.SetSquare(int(x), int(y), int(cell_size), color, this.Pixels)
}
x += cell_size
}
x = 0
y += cell_size
}
done <- true
}
func (this *DataGrid) Draw_PixelBasedDotted(done chan bool, row_start, row_end int, numbers_text *[256]*text.TextObject, print_val int, print_text bool){
// Shorthand
cell_size := int32(cfg.CELL_SIZE)
// references
var Cells = &(this.Cells)
// Vars
print_value := uint8(print_val)
row_skip := cfg.COLS * cfg.CELL_SIZE * 4 * (cfg.CELL_SIZE)
index := int32(3) + int32(row_skip * row_start)
if cell_size > 1 {
for row := row_start; row < row_end; row++ {
for col := 0; col < cfg.COLS; col++ {
(*this.Pixels)[index] = byte((*Cells)[row][col][print_value])
index += cell_size * 4
}
index += cfg.COLS * cfg.CELL_SIZE * 4 * (cfg.CELL_SIZE - 1)
}
} else {
for row := row_start; row < row_end; row++ {
for col := 0; col < cfg.COLS; col++ {
(*this.Pixels)[index] = byte((*Cells)[row][col][print_value])
index += cell_size * 4
}
}
}
done <- true
}
|
package main
import (
"fmt"
// "strconv"
// "strings"
"time"
)
type IPAddr struct {
When time.Time
What string
}
//func test() {
// hosts := map[string]IPAddr{
// "loopback": {127, 0, 0, 1},
// "googleDNS": {8, 8, 8, 8},
// }
//
// for _, ip := range hosts {
// res := []string{}
// for _, val := range ip {
// res = append(res, strconv.Itoa(int(val)))
// }
// fmt.Printf("%v\n", strings.Join(res, "."))
// }
//}
func (err *IPAddr) Error() string {
return fmt.Sprintf("When %v: \nWhat: %v", err.When, err.What)
}
func run() error {
return &IPAddr{
When: time.Now(),
What: "Toto",
}
}
func main() {
err := run()
if err != nil {
fmt.Println(err)
return
}
}
|
package main
import (
"binding"
"fmt"
)
func main() {
mas := []float64{3.0, 3.0}
fmt.Println(binding.Sphere_function(mas))
fmt.Println(binding.Rastrigin_function(mas))
fmt.Println(binding.Stibinski_Tanga_function(mas))
fmt.Println(binding.Ekli_function(mas))
fmt.Println(binding.Rosenbrock_function(mas))
fmt.Println(binding.Bill_function(mas))
fmt.Println(binding.Goldman_price_function(mas))
fmt.Println(binding.Boot_function(mas))
fmt.Println(binding.Bookin_function(mas))
fmt.Println(binding.Matias_function(mas))
fmt.Println(binding.Levi_function(mas))
fmt.Println(binding.Three_hump_camel_function(mas))
fmt.Println(binding.Easom_function(mas))
fmt.Println(binding.Luus_jaakola_method(mas, "Sphere_func", 0.001))
fmt.Println(binding.Hooke_jeeves_method(mas, "Sphere_func", 0.001))
fmt.Println(binding.Competing_points_method(mas, "Sphere_func"))
fmt.Println("EXEC TIME =", binding.Get_execution_time())
fmt.Println(binding.Get_min_x_rastrigin_function(mas))
fmt.Println(binding.Get_min_x_stibinski_tanga_function(mas))
fmt.Println(binding.Get_min_x_ekli_function(mas))
fmt.Println(binding.Get_min_x_sphere_function(mas))
fmt.Println(binding.Get_min_x_rosenbrock_function(mas))
fmt.Println(binding.Get_min_x_bill_function(mas))
fmt.Println(binding.Get_min_x_goldman_price_function(mas))
fmt.Println(binding.Get_min_x_bookin_function(mas))
fmt.Println(binding.Get_min_x_boot_function(mas))
fmt.Println(binding.Get_min_x_matias_function(mas))
fmt.Println(binding.Get_min_x_levi_function(mas))
fmt.Println(binding.Get_min_x_three_hump_camel_function(mas))
fmt.Println(binding.Get_min_x_easom_function(mas))
}
|
package main
import (
"bufio"
"flag"
"fmt"
"os"
"runtime"
)
// a sensible default is to use the number of CPUs available
var parallelism = flag.Int("parallelism", runtime.NumCPU(), "how many commands to run at a time")
// parse flags and commandline args
func parseArgs() {
flag.Parse()
if len(flag.Args()) != 1 {
fmt.Fprintln(os.Stderr, "expecting a single argument - the path of the file of commands to run")
os.Exit(2)
}
}
// read in commands to run
func getCmdStrings() []string {
cmdsFilePath := flag.Arg(0)
cmdsFile, err := os.Open(cmdsFilePath)
if err != nil {
panic(err)
}
defer cmdsFile.Close()
// one command per line
scanner := bufio.NewScanner(cmdsFile)
scanner.Split(bufio.ScanLines)
// slurp in all the lines
var lines []string
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines
}
|
package interaction
type InteractionSaver struct{
inner Interactor
responsesReceived map[string]*Response
}
var _ Interactor = InteractionSaver{}
func NewInteractionSaver(inner Interactor) InteractionSaver {
return InteractionSaver{
inner: inner,
responsesReceived: map[string]*Response{},
}
}
func (is InteractionSaver) Prompt(c *Challenge) (*Response, error) {
res, err := is.inner.Prompt(c)
if err == nil && c.UniqueID != "" {
is.responsesReceived[c.UniqueID] = res
}
return res, err
}
func (is InteractionSaver) Status(si *StatusInfo) (StatusSink, error) {
return is.inner.Status(si)
}
// Returns a map from challenge UniqueIDs to responses received for those
// UniqueIDs. Do not mutate the returned map.
func (is InteractionSaver) ResponsesReceived() map[string]*Response {
return is.responsesReceived
}
|
package main
import (
"CRUDtutor/app"
"CRUDtutor/controllers"
"fmt"
"github.com/gorilla/mux"
"net/http"
"os"
)
func main() {
router := mux.NewRouter()
router.HandleFunc("/view", controllers.ViewImage).Methods("GET")
router.HandleFunc("/register", controllers.CreateAccount).Methods("POST")
router.HandleFunc("/login", controllers.Authenticate).Methods("POST")
router.HandleFunc("/contacts/new", controllers.CreateContact).Methods("POST")
router.HandleFunc("/user/{id}/contacts", controllers.GetContactsFor).Methods("GET")
router.Use(app.JwtAuthentication)
port := os.Getenv("PORT")
if port == "" {
port = "8000" //localhost
}
fmt.Println(port)
err := http.ListenAndServe(":"+port, router)
if err != nil {
fmt.Print(err)
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/col/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/colcontainer"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/cockroach/pkg/sql/colexecop"
"github.com/cockroachdb/cockroach/pkg/sql/colmem"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
var zeroIntColumn = make([]int, coldata.MaxBatchSize)
func newPartitionerToOperator(
allocator *colmem.Allocator, types []*types.T, partitioner colcontainer.PartitionedQueue,
) *partitionerToOperator {
return &partitionerToOperator{
allocator: allocator,
types: types,
partitioner: partitioner,
}
}
// partitionerToOperator is an Operator that Dequeue's from the corresponding
// partition on every call to Next. It is a converter from filled in
// PartitionedQueue to Operator.
type partitionerToOperator struct {
colexecop.ZeroInputNode
colexecop.NonExplainable
allocator *colmem.Allocator
types []*types.T
partitioner colcontainer.PartitionedQueue
partitionIdx int
batch coldata.Batch
}
var _ colexecop.Operator = &partitionerToOperator{}
func (p *partitionerToOperator) Init() {
if p.batch == nil {
// We will be dequeueing the batches from disk into this batch, so we
// need to have enough capacity to support the batches of any size.
p.batch = p.allocator.NewMemBatchWithFixedCapacity(p.types, coldata.BatchSize())
}
}
func (p *partitionerToOperator) Next(ctx context.Context) coldata.Batch {
var err error
// We need to perform the memory accounting on the dequeued batch. Note that
// such setup allows us to release the memory under the old p.batch (which
// is no longer valid) and to retain the memory under the just dequeued one.
p.allocator.PerformOperation(p.batch.ColVecs(), func() {
err = p.partitioner.Dequeue(ctx, p.partitionIdx, p.batch)
})
if err != nil {
colexecerror.InternalError(err)
}
return p.batch
}
func makeOrdering(cols []uint32) []execinfrapb.Ordering_Column {
res := make([]execinfrapb.Ordering_Column, len(cols))
for i, colIdx := range cols {
res[i].ColIdx = colIdx
}
return res
}
|
var res [][]int
func permute(nums []int) [][]int {
res = [][]int{}
backTrack(nums,len(nums),[]int{})
return res
}
func backTrack(nums []int,numsLen int,path []int) {
if len(nums)==0{
p:=make([]int,len(path))
copy(p,path)
res = append(res,p)
}
for i:=0;i<numsLen;i++{
cur:=nums[i]
path = append(path,cur)
nums = append(nums[:i],nums[i+1:]...)//直接使用切片
backTrack(nums,len(nums),path)
nums = append(nums[:i],append([]int{cur},nums[i:]...)...)//回溯的时候切片也要复原,元素位置不能变
path = path[:len(path)-1]
}
} |
package main
import (
"Tarea1/Logistica/logistica"
"bufio"
"fmt"
"google.golang.org/grpc"
"log"
"net"
"os"
"strings"
"sync"
)
//GetOutboundIP is
func GetOutboundIP() net.IP {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
log.Fatal(err)
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP
}
func servirServidor(wg *sync.WaitGroup, logisticaServer *logistica.ServerLogistica, puerto string) {
lis, err := net.Listen("tcp", ":"+puerto)
if err != nil {
log.Fatalf("Failed to listen on port %s: %v", puerto, err)
}
grpcServer := grpc.NewServer()
logistica.RegisterLogisticaServiceServer(grpcServer, logisticaServer)
if err := grpcServer.Serve(lis); err != nil {
log.Fatalf("Failed to serve gRPC server over port %s: %v", puerto, err)
}
}
func main() {
var wg sync.WaitGroup
log.Printf("El IP del servidor es: %v", GetOutboundIP())
reader := bufio.NewReader(os.Stdin)
fmt.Printf("Ingrese nombre de la maquina donde se encuentra finanzas: ")
ipFinanzas, _ := reader.ReadString('\n')
ipFinanzas = strings.TrimSuffix(ipFinanzas, "\n")
ipFinanzas = strings.TrimSuffix(ipFinanzas, "\r")
s := logistica.ServerLogistica{}
s.SetIPFinanzas(ipFinanzas)
wg.Add(1)
go servirServidor(&wg, &s, "9000")
wg.Add(1)
go servirServidor(&wg, &s, "9100")
wg.Add(1)
go servirServidor(&wg, &s, "9101")
wg.Add(1)
go servirServidor(&wg, &s, "9102")
wg.Wait()
}
|
package resolver
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/opsee/basic/schema"
opsee_aws_ec2 "github.com/opsee/basic/schema/aws/ec2"
opsee_aws_rds "github.com/opsee/basic/schema/aws/rds"
opsee "github.com/opsee/basic/service"
log "github.com/opsee/logrus"
"golang.org/x/net/context"
)
func (c *Client) GetInstances(ctx context.Context, user *schema.User, region, vpc, instanceType, instanceId string) (interface{}, error) {
log.WithFields(log.Fields{
"customer_id": user.CustomerId,
}).Info("get instances request")
switch instanceType {
case "ec2":
return c.getInstancesEc2(ctx, user, region, vpc, instanceId)
case "rds":
return c.getInstancesRds(ctx, user, region, vpc, instanceId)
}
return fmt.Errorf("instance type not known: %s", instanceType), nil
}
func (c *Client) getInstancesEc2(ctx context.Context, user *schema.User, region, vpc, instanceId string) ([]*opsee_aws_ec2.Instance, error) {
input := &opsee_aws_ec2.DescribeInstancesInput{
Filters: []*opsee_aws_ec2.Filter{
{
Name: aws.String("vpc-id"),
Values: []string{vpc},
},
},
}
if instanceId != "" {
input.InstanceIds = []string{instanceId}
}
resp, err := c.Bezos.Get(ctx, &opsee.BezosRequest{User: user, Region: region, VpcId: vpc, Input: &opsee.BezosRequest_Ec2_DescribeInstancesInput{input}})
if err != nil {
return nil, err
}
output := resp.GetEc2_DescribeInstancesOutput()
if output == nil {
return nil, fmt.Errorf("error decoding aws response")
}
instances := make([]*opsee_aws_ec2.Instance, 0)
for _, res := range output.Reservations {
if res.Instances == nil {
continue
}
for _, inst := range res.Instances {
instances = append(instances, inst)
}
}
return instances, nil
}
func (c *Client) getInstancesRds(ctx context.Context, user *schema.User, region, vpc, instanceId string) ([]*opsee_aws_rds.DBInstance, error) {
// filter is not supported
input := &opsee_aws_rds.DescribeDBInstancesInput{}
if instanceId != "" {
input.DBInstanceIdentifier = aws.String(instanceId)
}
resp, err := c.Bezos.Get(ctx, &opsee.BezosRequest{User: user, Region: region, VpcId: vpc, Input: &opsee.BezosRequest_Rds_DescribeDBInstancesInput{input}})
if err != nil {
return nil, err
}
output := resp.GetRds_DescribeDBInstancesOutput()
if output == nil {
return nil, fmt.Errorf("error decoding aws response")
}
return output.DBInstances, nil
}
|
package utils
import (
"testing"
)
func TestGetLinkForMessage(t *testing.T) {
link := GetLinkForMessage("!linkMe mhwi_deco_rates")
if link != "mhwi_deco_rates : https://mhworld.kiranico.com/decorations " {
t.Errorf("link incorrect, got: %s, want: %s.", link, "mhwi_deco_rates : https://mhworld.kiranico.com/decorations")
}
}
|
// REST API for TODO application
//
// Provides REST API for create, read, update and delete tasks.
//
// Schemes: http
// BasePath: /
// Version: 0.0.1
// Host: localhost
//
// Consumes:
// - application/json
//
// Produces:
// - application/json
//
// swagger:meta
package rest
import (
"context"
"encoding/json"
"net/http"
"strconv"
"github.com/go-chi/chi"
"github.com/pkg/errors"
"github.com/souryogurt/victim"
)
type TaskService interface {
GetAllTasks(ctx context.Context) ([]*victim.Task, error)
CreateTask(ctx context.Context, task *victim.Task) (*victim.Task, error)
GetTask(ctx context.Context, ID int) (*victim.Task, error)
UpdateTask(ctx context.Context, task *victim.Task) (*victim.Task, error)
DeleteTask(ctx context.Context, ID int) (int, error)
Println(ctx context.Context, v ...interface{})
}
// A list of all requested tasks
// swagger:response getAllTasksResponse
type GetAllTasksResponse struct {
// The array of all tasks
// in: body
Body []*victim.Task
}
// swagger:route GET /tasks tasks getalltasks
// List all tasks
//
// List all tasks
// Responses:
// 200: getAllTasksResponse
// 500: description:Internal server error
func GetAllTasks(svc TaskService) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
tasks, err := svc.GetAllTasks(ctx)
if err != nil {
err = errors.Wrap(err, "can't retreive tasks")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
js, err := json.Marshal(tasks)
if err != nil {
err = errors.Wrap(err, "can't marshal response")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(js); err != nil {
err = errors.Wrap(err, "can't write response")
svc.Println(ctx, err)
}
}
}
// swagger:parameters createtask
type CreateTaskParams struct {
// Required: true
// in: body
Body victim.Task
}
// An attributes of created task
// swagger:response createTaskResponse
type CreateTaskResponse struct {
// Created task parameters
//
// in: body
Body *victim.Task
}
// swagger:route POST /tasks tasks createtask
// Create new task
//
// Create new task
// Responses:
// 201: createTaskResponse
// 400: description:Bad incoming JSON
// 500: description:Internal server error
func CreateTask(svc TaskService) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var task *victim.Task
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&task); err != nil {
err = errors.Wrap(err, "can't decode request payload")
svc.Println(ctx, err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
resp, err := svc.CreateTask(ctx, task)
if err != nil {
err = errors.Wrap(err, "can't create task")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
js, err := json.Marshal(resp)
if err != nil {
err = errors.Wrap(err, "can't marshal response")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
if _, err := w.Write(js); err != nil {
err = errors.Wrap(err, "can't write response")
svc.Println(ctx, err)
}
}
}
// swagger:parameters gettask
type GetTaskParams struct {
// Identifier of the task
// Required: true
// in: path
ID int
}
// A requested task
// swagger:response getTaskResponse
type GetTaskResponse struct {
// The task
// in: body
Body *victim.Task
}
// swagger:route GET /task/{ID} tasks gettask
// Get task by ID
//
// Get task by ID
// Responses:
// 200: getTaskResponse
// 400: description:Invalid task ID
// 500: description:Internal server error
func GetTask(svc TaskService) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ID, err := strconv.Atoi(chi.URLParam(r, "taskID"))
if err != nil {
err = errors.Wrap(err, "can't parse task ID")
svc.Println(ctx, err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
task, err := svc.GetTask(ctx, ID)
if err != nil {
err = errors.Wrap(err, "can't retreive task")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
js, err := json.Marshal(task)
if err != nil {
err = errors.Wrap(err, "can't marshal response")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(js); err != nil {
err = errors.Wrap(err, "can't write response")
svc.Println(ctx, err)
}
}
}
// swagger:parameters updatetask
type UpdateTaskParams struct {
// Identifier of the task
// Required: true
// in: path
ID int
}
// An updated task attributes
// swagger:response updateTaskResponse
type UpdateTaskResponse struct {
// Updated task attributes
// in: body
Body *victim.Task
}
// swagger:route PUT /task/{ID} tasks updatetask
// Update task
//
// Update task
// Responses:
// 200: updateTaskResponse
// 400: description:Invalid task ID
// 500: description:Internal server error
func UpdateTask(svc TaskService) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ID, err := strconv.Atoi(chi.URLParam(r, "taskID"))
if err != nil {
err = errors.Wrap(err, "can't parse task ID")
svc.Println(ctx, err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
var task *victim.Task
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&task); err != nil {
err = errors.Wrap(err, "can't decode request payload")
svc.Println(ctx, err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
task.ID = ID
resp, err := svc.UpdateTask(ctx, task)
if err != nil {
err = errors.Wrap(err, "can't update task")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
js, err := json.Marshal(resp)
if err != nil {
err = errors.Wrap(err, "can't marshal response")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(js); err != nil {
err = errors.Wrap(err, "can't write response")
svc.Println(ctx, err)
}
}
}
// swagger:parameters deletetask
type DeleteTaskParams struct {
// Identifier of the task
// Required: true
// in: path
ID int
}
// An ID of deleted task
// swagger:response deleteTaskResponse
type DeleteTaskResponse struct {
// Deleted task ID
// in: body
Body int
}
// swagger:route DELETE /task/{ID} tasks deletetask
// Delete task
//
// Delete task
// Responses:
// 200: deleteTaskResponse
// 400: description:Invalid task ID
// 500: description:Internal server error
func DeleteTask(svc TaskService) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ID, err := strconv.Atoi(chi.URLParam(r, "taskID"))
if err != nil {
err = errors.Wrap(err, "can't parse task ID")
svc.Println(ctx, err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
taskID, err := svc.DeleteTask(ctx, ID)
if err != nil {
err = errors.Wrap(err, "can't delete task")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
js, err := json.Marshal(taskID)
if err != nil {
err = errors.Wrap(err, "can't marshal response")
svc.Println(ctx, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(js); err != nil {
err = errors.Wrap(err, "can't write response")
svc.Println(ctx, err)
}
}
}
|
package primitives
type Lambertian struct {
C Vector
}
func (l Lambertian) Bounce(input Ray, hit HitRecord) (bool, Ray) {
direction := hit.Normal.Add(VectorInUnitSphere())
return true, Ray{hit.Point, direction}
}
func (l Lambertian) Color() Vector {
return l.C
} |
// Copyright 2023 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"crypto/tls"
"encoding/asn1"
"io"
"net"
"strings"
"sync/atomic"
"time"
"github.com/gravitational/trace"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/exp/slices"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"github.com/gravitational/teleport/api/breaker"
"github.com/gravitational/teleport/api/client"
"github.com/gravitational/teleport/api/client/proxy/transport/transportv1"
"github.com/gravitational/teleport/api/defaults"
transportv1pb "github.com/gravitational/teleport/api/gen/proto/go/teleport/transport/v1"
"github.com/gravitational/teleport/api/metadata"
"github.com/gravitational/teleport/api/observability/tracing"
tracessh "github.com/gravitational/teleport/api/observability/tracing/ssh"
)
// SSHDialer provides a mechanism to create a ssh client.
type SSHDialer interface {
// Dial establishes a client connection to an SSH server.
Dial(ctx context.Context, network string, addr string, config *ssh.ClientConfig) (*tracessh.Client, error)
}
// SSHDialerFunc implements SSHDialer
type SSHDialerFunc func(ctx context.Context, network string, addr string, config *ssh.ClientConfig) (*tracessh.Client, error)
// Dial calls f(ctx, network, addr, config).
func (f SSHDialerFunc) Dial(ctx context.Context, network string, addr string, config *ssh.ClientConfig) (*tracessh.Client, error) {
return f(ctx, network, addr, config)
}
// ClientConfig contains configuration needed for a Client
// to be able to connect to the cluster.
type ClientConfig struct {
// ProxyAddress is the address of the Proxy server.
ProxyAddress string
// TLSRoutingEnabled indicates if the cluster is using TLS Routing.
TLSRoutingEnabled bool
// TLSConfig contains the tls.Config required for mTLS connections.
TLSConfig *tls.Config
// UnaryInterceptors are optional [grpc.UnaryClientInterceptor] to apply
// to the gRPC client.
UnaryInterceptors []grpc.UnaryClientInterceptor
// StreamInterceptors are optional [grpc.StreamClientInterceptor] to apply
// to the gRPC client.
StreamInterceptors []grpc.StreamClientInterceptor
// SSHDialer allows callers to control how a [tracessh.Client] is created.
SSHDialer SSHDialer
// SSHConfig is the [ssh.ClientConfig] used to connect to the Proxy SSH server.
SSHConfig *ssh.ClientConfig
// DialTimeout defines how long to attempt dialing before timing out.
DialTimeout time.Duration
// DialOpts define options for dialing the client connection.
DialOpts []grpc.DialOption
// ALPNConnUpgradeRequired indicates that ALPN connection upgrades are
// required for making TLS routing requests.
ALPNConnUpgradeRequired bool
// InsecureSkipVerify is an option to skip HTTPS cert check
InsecureSkipVerify bool
// The below items are intended to be used by tests to connect without mTLS.
// The gRPC transport credentials to use when establishing the connection to proxy.
creds func() credentials.TransportCredentials
// The client credentials to use when establishing the connection to auth.
clientCreds func() client.Credentials
}
// CheckAndSetDefaults ensures required options are present and
// sets the default value of any that are omitted.
func (c *ClientConfig) CheckAndSetDefaults() error {
if c.ProxyAddress == "" {
return trace.BadParameter("missing required parameter ProxyAddress")
}
if c.SSHDialer == nil {
return trace.BadParameter("missing required parameter SSHDialer")
}
if c.SSHConfig == nil {
return trace.BadParameter("missing required parameter SSHConfig")
}
if c.DialTimeout <= 0 {
c.DialTimeout = defaults.DefaultIOTimeout
}
if c.TLSConfig != nil {
c.clientCreds = func() client.Credentials {
return client.LoadTLS(c.TLSConfig.Clone())
}
c.creds = func() credentials.TransportCredentials {
tlsCfg := c.TLSConfig.Clone()
if !slices.Contains(c.TLSConfig.NextProtos, protocolProxySSHGRPC) {
tlsCfg.NextProtos = append(tlsCfg.NextProtos, protocolProxySSHGRPC)
}
// This logic still appears to be necessary to force client to always send
// a certificate regardless of the server setting. Otherwise the client may pick
// not to send the client certificate by looking at certificate request.
if len(tlsCfg.Certificates) > 0 {
cert := tlsCfg.Certificates[0]
tlsCfg.Certificates = nil
tlsCfg.GetClientCertificate = func(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) {
return &cert, nil
}
}
return credentials.NewTLS(tlsCfg)
}
} else {
c.clientCreds = func() client.Credentials {
return insecureCredentials{}
}
c.creds = func() credentials.TransportCredentials {
return insecure.NewCredentials()
}
}
return nil
}
// insecureCredentials implements [client.Credentials] and is used by tests
// to connect to the Auth server without mTLS.
type insecureCredentials struct{}
func (mc insecureCredentials) Dialer(client.Config) (client.ContextDialer, error) {
return nil, trace.NotImplemented("no dialer")
}
func (mc insecureCredentials) TLSConfig() (*tls.Config, error) {
return nil, nil
}
func (mc insecureCredentials) SSHClientConfig() (*ssh.ClientConfig, error) {
return nil, trace.NotImplemented("no ssh config")
}
// Client is a client to the Teleport Proxy SSH server on behalf of a user.
// The Proxy SSH port used to serve only SSH, however portions of the api are
// being migrated to gRPC to reduce latency. The Client is capable of communicating
// to the Proxy via both mechanism; by default it will choose to use gRPC over
// SSH where it is able to.
type Client struct {
// cfg are the user provided configuration parameters required to
// connect and interact with the Proxy.
cfg *ClientConfig
// grpcConn is the established gRPC connection to the Proxy.
grpcConn *grpc.ClientConn
// transport is the transportv1.Client
transport *transportv1.Client
// sshClient is the established SSH connection to the Proxy.
sshClient *tracessh.Client
// clusterName as determined by inspecting the certificate presented by
// the Proxy during the connection handshake.
clusterName *clusterName
}
// protocolProxySSHGRPC is TLS ALPN protocol value used to indicate gRPC
// traffic intended for the Teleport Proxy on the SSH port.
const protocolProxySSHGRPC string = "teleport-proxy-ssh-grpc"
// NewClient creates a new Client that attempts to connect to the gRPC
// server being served by the Proxy SSH port by default. If unable to
// connect the Client falls back to connecting to the Proxy SSH port
// via SSH.
//
// If it is known that the gRPC server doesn't serve the required API
// of the caller, then prefer to use NewSSHClient instead which omits
// the gRPC dialing altogether.
func NewClient(ctx context.Context, cfg ClientConfig) (*Client, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
clt, grpcErr := newGRPCClient(ctx, &cfg)
if grpcErr == nil {
// Attempt an RPC to ensure the proxy is serving gRPC on the
// SSH Port. This is needed for backward compatibility with
// Proxies that aren't serving gRPC since dialing happens in
// the background.
//
// DELETE IN 14.0.0
_, err := clt.transport.ClusterDetails(ctx)
if err == nil {
return clt, nil
}
}
clt, sshErr := newSSHClient(ctx, &cfg)
// Only aggregate errors if there was an issue dialing the grpc server so
// that helpers like trace.IsAccessDenied will still work.
if grpcErr == nil {
return clt, trace.Wrap(sshErr)
}
return nil, trace.NewAggregate(grpcErr, sshErr)
}
// clusterName stores the name of the cluster
// in a protected manner which allows it to
// be set during handshakes with the server.
type clusterName struct {
name atomic.Pointer[string]
}
func (c *clusterName) get() string {
name := c.name.Load()
if name != nil {
return *name
}
return ""
}
func (c *clusterName) set(name string) {
c.name.CompareAndSwap(nil, &name)
}
// clusterCredentials is a [credentials.TransportCredentials] implementation
// that obtains the name of the cluster being connected to from the certificate
// presented by the server. This allows the client to determine the cluster name when
// connecting via jump hosts.
type clusterCredentials struct {
credentials.TransportCredentials
clusterName *clusterName
}
var (
// teleportClusterASN1ExtensionOID is an extension ID used when encoding/decoding
// origin teleport cluster name into certificates.
teleportClusterASN1ExtensionOID = asn1.ObjectIdentifier{1, 3, 9999, 1, 7}
)
// ClientHandshake performs the handshake with the wrapped [credentials.TransportCredentials] and
// then inspects the provided cert for the [teleportClusterASN1ExtensionOID] to determine
// the cluster that the server belongs to.
func (c *clusterCredentials) ClientHandshake(ctx context.Context, authority string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
conn, info, err := c.TransportCredentials.ClientHandshake(ctx, authority, conn)
if err != nil {
return conn, info, trace.Wrap(err)
}
tlsInfo, ok := info.(credentials.TLSInfo)
if !ok {
return conn, info, nil
}
certs := tlsInfo.State.PeerCertificates
if len(certs) == 0 {
return conn, info, nil
}
clientCert := certs[0]
for _, attr := range clientCert.Subject.Names {
if attr.Type.Equal(teleportClusterASN1ExtensionOID) {
val, ok := attr.Value.(string)
if ok {
c.clusterName.set(val)
break
}
}
}
return conn, info, nil
}
// newGRPCClient creates a Client that is connected via gRPC.
func newGRPCClient(ctx context.Context, cfg *ClientConfig) (_ *Client, err error) {
dialCtx, cancel := context.WithTimeout(ctx, cfg.DialTimeout)
defer cancel()
c := &clusterName{}
conn, err := grpc.DialContext(
dialCtx,
cfg.ProxyAddress,
append([]grpc.DialOption{
grpc.WithContextDialer(newDialerForGRPCClient(ctx, cfg)),
grpc.WithTransportCredentials(&clusterCredentials{TransportCredentials: cfg.creds(), clusterName: c}),
grpc.WithChainUnaryInterceptor(
append(cfg.UnaryInterceptors,
otelgrpc.UnaryClientInterceptor(),
metadata.UnaryClientInterceptor,
)...,
),
grpc.WithChainStreamInterceptor(
append(cfg.StreamInterceptors,
otelgrpc.StreamClientInterceptor(),
metadata.StreamClientInterceptor,
)...,
),
}, cfg.DialOpts...)...,
)
if err != nil {
return nil, trace.Wrap(err)
}
defer func() {
if err != nil {
_ = conn.Close()
}
}()
transport, err := transportv1.NewClient(transportv1pb.NewTransportServiceClient(conn))
if err != nil {
return nil, trace.Wrap(err)
}
return &Client{
cfg: cfg,
grpcConn: conn,
transport: transport,
clusterName: c,
}, nil
}
func newDialerForGRPCClient(ctx context.Context, cfg *ClientConfig) func(context.Context, string) (net.Conn, error) {
return client.GRPCContextDialer(client.NewDialer(ctx, defaults.DefaultIdleTimeout, cfg.DialTimeout,
client.WithInsecureSkipVerify(cfg.InsecureSkipVerify),
client.WithALPNConnUpgrade(cfg.ALPNConnUpgradeRequired),
client.WithALPNConnUpgradePing(true), // Use Ping protocol for long-lived connections.
))
}
// teleportAuthority is the extension set by the server
// which contains the name of the cluster it is in.
const teleportAuthority = "x-teleport-authority"
// clusterCallback is a [ssh.HostKeyCallback] that obtains the name
// of the cluster being connected to from the certificate presented by the server.
// This allows the client to determine the cluster name when using jump hosts.
func clusterCallback(c *clusterName, wrapped ssh.HostKeyCallback) ssh.HostKeyCallback {
return func(hostname string, remote net.Addr, key ssh.PublicKey) error {
if err := wrapped(hostname, remote, key); err != nil {
return trace.Wrap(err)
}
cert, ok := key.(*ssh.Certificate)
if !ok {
return nil
}
clusterName, ok := cert.Permissions.Extensions[teleportAuthority]
if ok {
c.set(clusterName)
}
return nil
}
}
// newSSHClient creates a Client that is connected via SSH.
func newSSHClient(ctx context.Context, cfg *ClientConfig) (*Client, error) {
c := &clusterName{}
clientCfg := &ssh.ClientConfig{
User: cfg.SSHConfig.User,
Auth: cfg.SSHConfig.Auth,
HostKeyCallback: clusterCallback(c, cfg.SSHConfig.HostKeyCallback),
BannerCallback: cfg.SSHConfig.BannerCallback,
ClientVersion: cfg.SSHConfig.ClientVersion,
HostKeyAlgorithms: cfg.SSHConfig.HostKeyAlgorithms,
Timeout: cfg.SSHConfig.Timeout,
}
clt, err := cfg.SSHDialer.Dial(ctx, "tcp", cfg.ProxyAddress, clientCfg)
if err != nil {
return nil, trace.Wrap(err)
}
return &Client{
cfg: cfg,
sshClient: clt,
clusterName: c,
}, nil
}
// ClusterName returns the name of the cluster that the
// connected Proxy is a member of.
func (c *Client) ClusterName() string {
return c.clusterName.get()
}
// Close attempts to close both the gRPC and SSH connections.
func (c *Client) Close() error {
var errs []error
if c.sshClient != nil {
errs = append(errs, c.sshClient.Close())
}
if c.grpcConn != nil {
errs = append(errs, c.grpcConn.Close())
}
return trace.NewAggregate(errs...)
}
// SSHConfig returns the [ssh.ClientConfig] for the provided user which
// should be used when creating a [tracessh.Client] with the returned
// [net.Conn] from [Client.DialHost].
func (c *Client) SSHConfig(user string) *ssh.ClientConfig {
return &ssh.ClientConfig{
Config: c.cfg.SSHConfig.Config,
User: user,
Auth: c.cfg.SSHConfig.Auth,
HostKeyCallback: c.cfg.SSHConfig.HostKeyCallback,
BannerCallback: c.cfg.SSHConfig.BannerCallback,
ClientVersion: c.cfg.SSHConfig.ClientVersion,
HostKeyAlgorithms: c.cfg.SSHConfig.HostKeyAlgorithms,
Timeout: c.cfg.SSHConfig.Timeout,
}
}
// ClusterDetails provide cluster configuration
// details as known by the connected Proxy.
type ClusterDetails struct {
// FIPS dictates whether FIPS mode is enabled.
FIPS bool
}
// ClientConfig returns a [client.Config] that may be used to connect to the
// Auth server in the provided cluster via [client.New] or similar. The [client.Config]
// returned will have the correct credentials and dialer set based on the ClientConfig
// that was provided to create this Client.
func (c *Client) ClientConfig(ctx context.Context, cluster string) client.Config {
switch {
case c.cfg.TLSRoutingEnabled:
return client.Config{
Context: ctx,
Addrs: []string{c.cfg.ProxyAddress},
Credentials: []client.Credentials{c.cfg.clientCreds()},
ALPNSNIAuthDialClusterName: cluster,
CircuitBreakerConfig: breaker.NoopBreakerConfig(),
ALPNConnUpgradeRequired: c.cfg.ALPNConnUpgradeRequired,
}
case c.sshClient != nil:
return client.Config{
Context: ctx,
Credentials: []client.Credentials{c.cfg.clientCreds()},
CircuitBreakerConfig: breaker.NoopBreakerConfig(),
DialInBackground: true,
Dialer: client.ContextDialerFunc(func(dialCtx context.Context, _ string, _ string) (net.Conn, error) {
// Don't dial if the context has timed out.
select {
case <-dialCtx.Done():
return nil, dialCtx.Err()
default:
}
conn, err := dialSSH(dialCtx, c.sshClient, c.cfg.ProxyAddress, "@"+cluster, nil)
return conn, trace.Wrap(err)
}),
}
default:
return client.Config{
Context: ctx,
Credentials: []client.Credentials{c.cfg.clientCreds()},
CircuitBreakerConfig: breaker.NoopBreakerConfig(),
DialInBackground: true,
Dialer: client.ContextDialerFunc(func(dialCtx context.Context, _ string, _ string) (net.Conn, error) {
// Don't dial if the context has timed out.
select {
case <-dialCtx.Done():
return nil, dialCtx.Err()
default:
}
// Intentionally not using the dial context because it is only valid
// for the lifetime of the dial. Using it causes the stream to be terminated
// immediately after the dial completes.
connContext := tracing.WithPropagationContext(context.Background(), tracing.PropagationContextFromContext(dialCtx))
conn, err := c.transport.DialCluster(connContext, cluster, nil)
return conn, trace.Wrap(err)
}),
}
}
}
// DialHost establishes a connection to the `target` in cluster named `cluster`. If a keyring
// is provided it will only be forwarded if proxy recording mode is enabled in the cluster.
func (c *Client) DialHost(ctx context.Context, target, cluster string, keyring agent.ExtendedAgent) (net.Conn, ClusterDetails, error) {
if c.sshClient != nil {
conn, details, err := c.dialHostSSH(ctx, target, cluster, keyring)
return conn, details, trace.Wrap(err)
}
conn, details, err := c.transport.DialHost(ctx, target, cluster, nil, keyring)
if err != nil {
return nil, ClusterDetails{}, trace.ConnectionProblem(err, "failed connecting to host %s: %v", target, err)
}
return conn, ClusterDetails{FIPS: details.FipsEnabled}, nil
}
// dialHostSSH connects to the target via SSH. To match backwards compatibility the
// cluster details are retrieved from the Proxy SSH server via a clusterDetailsRequest
// request to determine if the keyring should be forwarded.
func (c *Client) dialHostSSH(ctx context.Context, target, cluster string, keyring agent.ExtendedAgent) (net.Conn, ClusterDetails, error) {
details, err := c.clusterDetailsSSH(ctx)
if err != nil {
return nil, ClusterDetails{FIPS: details.FIPSEnabled}, trace.Wrap(err)
}
// Prevent forwarding the keychain if the proxy is
// not doing the recording.
if !details.RecordingProxy {
keyring = nil
}
conn, err := dialSSH(ctx, c.sshClient, c.cfg.ProxyAddress, target+"@"+cluster, keyring)
return conn, ClusterDetails{FIPS: details.FIPSEnabled}, trace.Wrap(err)
}
// ClusterDetails retrieves cluster information as seen by the Proxy.
func (c *Client) ClusterDetails(ctx context.Context) (ClusterDetails, error) {
if c.sshClient != nil {
details, err := c.clusterDetailsSSH(ctx)
return ClusterDetails{FIPS: details.FIPSEnabled}, trace.Wrap(err)
}
details, err := c.transport.ClusterDetails(ctx)
if err != nil {
return ClusterDetails{}, trace.Wrap(err)
}
return ClusterDetails{FIPS: details.FipsEnabled}, nil
}
// sshDetails is the response from a clusterDetailsRequest.
type sshDetails struct {
RecordingProxy bool
FIPSEnabled bool
}
const clusterDetailsRequest = "cluster-details@goteleport.com"
// clusterDetailsSSH retrieves the cluster details via a clusterDetailsRequest.
func (c *Client) clusterDetailsSSH(ctx context.Context) (sshDetails, error) {
ok, resp, err := c.sshClient.SendRequest(ctx, clusterDetailsRequest, true, nil)
if err != nil {
return sshDetails{}, trace.Wrap(err)
}
if !ok {
return sshDetails{}, trace.ConnectionProblem(nil, "failed to get cluster details")
}
var details sshDetails
if err := ssh.Unmarshal(resp, &details); err != nil {
return sshDetails{}, trace.Wrap(err)
}
return details, trace.Wrap(err)
}
// dialSSH creates a SSH session to the target address and proxies a [net.Conn]
// over the standard input and output of the session.
func dialSSH(ctx context.Context, clt *tracessh.Client, proxyAddress, targetAddress string, keyring agent.ExtendedAgent) (_ net.Conn, err error) {
session, err := clt.NewSession(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
defer func() {
if err != nil {
_ = session.Close()
}
}()
conn, err := newSessionConn(session, proxyAddress, targetAddress)
if err != nil {
return nil, trace.Wrap(err)
}
defer func() {
if err != nil {
_ = conn.Close()
}
}()
sessionError, err := session.StderrPipe()
if err != nil {
return nil, trace.Wrap(err)
}
// If a keyring was provided then set up agent forwarding.
if keyring != nil {
// Add a handler to receive requests on the auth-agent@openssh.com channel. If there is
// already a handler it's safe to ignore the error because we only need one active handler
// to process requests.
err = agent.ForwardToAgent(clt.Client, keyring)
if err != nil && !strings.Contains(err.Error(), "agent: already have handler for") {
return nil, trace.Wrap(err)
}
err = agent.RequestAgentForwarding(session.Session)
if err != nil {
return nil, trace.Wrap(err)
}
}
if err := session.RequestSubsystem(ctx, "proxy:"+targetAddress); err != nil {
// read the stderr output from the failed SSH session and append
// it to the end of our own message:
serverErrorMsg, _ := io.ReadAll(sessionError)
return nil, trace.ConnectionProblem(err, "failed connecting to host %s: %s. %v", targetAddress, serverErrorMsg, err)
}
return conn, nil
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package apputil implements the libraries used to control ARC apps
package apputil
import (
"context"
"time"
"chromiumos/tast/local/chrome"
)
// ARCMediaPlayer specifies the features of an ARC++ app that can play an audio or video.
type ARCMediaPlayer interface {
// Install installs the app.
Install(ctx context.Context) error
// Launch launches the app and returns the time spent for the app to be visible.
Launch(ctx context.Context) (time.Duration, error)
// Play plays the specified music/song/video via the app.
Play(ctx context.Context, media *Media) error
// Close closes the app.
Close(ctx context.Context, cr *chrome.Chrome, hasError func() bool, outDir string) error
}
// Media collects the information of an media that ARCMediaPlayer is going to search and play.
type Media struct {
Query string // Query is the query text of the media.
Subtitle string // Subtitle is the sbutitle of the media.
}
// NewMedia returns the media that ARCMediaPlayer is going to search and play
func NewMedia(query, subtitle string) *Media {
return &Media{
Query: query,
Subtitle: subtitle,
}
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// MessageSignificanceCategory is documented here http://hl7.org/fhir/ValueSet/message-significance-category
type MessageSignificanceCategory int
const (
MessageSignificanceCategoryConsequence MessageSignificanceCategory = iota
MessageSignificanceCategoryCurrency
MessageSignificanceCategoryNotification
)
func (code MessageSignificanceCategory) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *MessageSignificanceCategory) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "consequence":
*code = MessageSignificanceCategoryConsequence
case "currency":
*code = MessageSignificanceCategoryCurrency
case "notification":
*code = MessageSignificanceCategoryNotification
default:
return fmt.Errorf("unknown MessageSignificanceCategory code `%s`", s)
}
return nil
}
func (code MessageSignificanceCategory) String() string {
return code.Code()
}
func (code MessageSignificanceCategory) Code() string {
switch code {
case MessageSignificanceCategoryConsequence:
return "consequence"
case MessageSignificanceCategoryCurrency:
return "currency"
case MessageSignificanceCategoryNotification:
return "notification"
}
return "<unknown>"
}
func (code MessageSignificanceCategory) Display() string {
switch code {
case MessageSignificanceCategoryConsequence:
return "Consequence"
case MessageSignificanceCategoryCurrency:
return "Currency"
case MessageSignificanceCategoryNotification:
return "Notification"
}
return "<unknown>"
}
func (code MessageSignificanceCategory) Definition() string {
switch code {
case MessageSignificanceCategoryConsequence:
return "The message represents/requests a change that should not be processed more than once; e.g., making a booking for an appointment."
case MessageSignificanceCategoryCurrency:
return "The message represents a response to query for current information. Retrospective processing is wrong and/or wasteful."
case MessageSignificanceCategoryNotification:
return "The content is not necessarily intended to be current, and it can be reprocessed, though there may be version issues created by processing old notifications."
}
return "<unknown>"
}
|
package core
import (
log "github.com/sirupsen/logrus"
)
type Subscriber interface {
Subscribe(done <-chan struct{}) (<-chan interface{}, <-chan error)
Convert(done <-chan struct{}, in <-chan interface{}) <-chan interface{}
Enqueue(done <-chan struct{}, in <-chan interface{}) (<-chan interface{}, <-chan error)
}
func Subscribe(s Subscriber) {
done := make(chan struct{})
defer close(done)
sourceC, errSubscribe := s.Subscribe(done)
convertC := s.Convert(done, sourceC)
out, errEnqueue := s.Enqueue(done, convertC)
complete:
for {
select {
case item := <-out:
log.Infof("%+v", item)
case err := <-errSubscribe:
log.Error(err)
break complete
case err := <-errEnqueue:
log.Error(err)
break complete
}
}
log.Info("completed")
}
|
package main
type person struct {
first string
last string
}
func main() {
x := person{
first: "rudi",
last: "visagie",
}
println(x.first, x.last)
changeme(&x)
println(x.first, x.last)
}
func changeme(p *person) {
(*p).first = "Hahahaha"
}
|
package check_whether_two_strings_are_almost_equivalent
func checkAlmostEquivalent(word1 string, word2 string) bool {
frequencies := make(map[int32]int)
for _, c := range word1 {
frequencies[c]++
}
for _, c := range word2 {
frequencies[c]--
}
for _, num := range frequencies {
if abs(num) > 3 {
return false
}
}
return true
}
func abs(i int) int {
if i < 0 {
return -i
}
return i
}
|
// Package reltest for unit testing database interaction.
package reltest
|
package main
import "fmt"
// 定义结构体表示对象属性
type Student struct {
Name string
age int
}
// 对象的行为用方法表示
// func (方法接受者)方法名(参数)(返回值){ return value}
// 值传递
func (stu Student) SayHiByValue() {
stu.Name = "修改值传递对象的名字"
fmt.Println("大家好,欢迎到Wovert大学。我是", stu.Name)
}
// 引用传递
func (stu *Student) SayHiByRef() {
stu.Name = "修改引用对象的名字"
fmt.Println("大家好,欢迎到Wovert大学。我是", stu.Name)
}
func main() {
//
stu := Student{"张三", 18}
stu.SayHiByValue()
fmt.Println("我是", stu.Name)
stuRef := &Student{"李四", 20}
stuRef.SayHiByRef()
fmt.Println("我是", stuRef.Name)
}
|
package acknowledge
type Acknowledge struct {
SegIDX int32
}
var ClosingAck = Acknowledge{
SegIDX: -1,
}
|
package httppool
import (
"net"
"sync/atomic"
"time"
)
type Conn struct {
usedAt int64 // atomic
netConn net.Conn
Inited bool
pooled bool
createdAt time.Time
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
netConn: netConn,
createdAt: time.Now(),
}
cn.SetUsedAt(time.Now())
return cn
}
func (cn *Conn) UsedAt() time.Time {
unix := atomic.LoadInt64(&cn.usedAt)
return time.Unix(unix, 0)
}
func (cn *Conn) SetUsedAt(tm time.Time) {
atomic.StoreInt64(&cn.usedAt, tm.Unix())
}
func (cn *Conn) Write(b []byte) (int, error) {
return cn.netConn.Write(b)
}
func (cn *Conn) Close() error {
return cn.netConn.Close()
}
|
// package main
// import "fmt"
// func main() {
// var a = [...]int{1, 3, 5, 7, 8}
// var sum int = 8
// for i := 0; i < len(a); i++ {
// sum += a[i]
// if i == len(a)-1 {
// fmt.Println("这个数组的和是:", sum)
// }
// }
// }
package main
import "fmt"
func main() {
a := [...]int{1, 2, 3, 4, 5, 6}
sum := 0
// for range(键值循环):遍历数组、切片、字符串、map及通道(channel)
// 通过for range遍历的返回值用_接收
for _, v := range a {
sum += v
}
fmt.Println(sum)
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"web/3w/token"
)
type Instance struct {
Idc string `json:"idc"`
Env string `json:"env"`
EnvDesc string `json:"env_desc"`
Product string `json:"product"`
ProductDesc string `json:"product_desc"`
Application string `json:"application"`
ApplicationDesc string `json:"application_desc"`
Domain string `json:"domain"`
Services string `json:"services"`
Hostname string `json:"hostname"`
InternalIP string `json:"internal_ip"`
ServiceIP string `json:"service_ip"`
UseState string `json:"use_state"`
Type string `json:"type"`
Department string `json:"department"`
Path string `json:"path"`
}
type Datas struct {
Total int `json:"total"`
Results int `json:"results"`
Page int `json:"page"`
Data []*Instance `json:"data"`
}
type Appinstance struct {
token.Statu
Data Datas `json:"data"`
}
func main() {
url := "http://beta-tree.op.kingnet.com/api/v1/instances/detail?page=1&results=10"
request, err := http.NewRequest("GET", url, nil)
token.Ferr(err)
tokens, _ := token.Gettoken()
request.Header.Add("Content-Type", "application/json")
request.Header.Add("X-Authorization-Token", tokens)
response, err := http.DefaultClient.Do(request)
token.Ferr(err)
resp, _ := ioutil.ReadAll(response.Body)
defer response.Body.Close()
instance := new(Appinstance)
json.Unmarshal(resp, instance)
fmt.Println(instance.Data.Data[0].Idc)
}
|
package graph
import (
"fmt"
"sort"
"sync"
"time"
)
const (
debug = false
maxLoadAttempts = 3
)
// NodeFetcher is a function that can lazily load Node data.
type NodeFetcher func(*Node) error
var defaultNodeFetcher NodeFetcher = func(n *Node) error {
n.SetData(true)
return nil
}
// Node represents a graph node.
type Node struct {
ID string
data interface{}
neighbours []*Node
load NodeFetcher
group *NodeGroup
lock sync.Mutex
// paths map[string][]Path
}
// NewNode constructs a new node with an ID and a lazy loader, and returns a pointer to the newly constructed Node.
// Parameter 1: id - ID for the new Node.
// Parameter 2: loader - NodeFetcher to lazy load the Node. Defaults to an empty NodeFetcher if not specified.
// Parameter 3: group - NodeGroup that this Node should belong to. Defaults to a default NodeGroup if not specified.
func NewNode(id string, otherArgs ...interface{}) *Node {
var loader NodeFetcher
var group *NodeGroup
if len(otherArgs) > 0 && otherArgs[0] != nil {
loader = otherArgs[0].(NodeFetcher)
}
if len(otherArgs) > 1 {
group = otherArgs[1].(*NodeGroup)
}
if loader == nil {
loader = defaultNodeFetcher
}
if group == nil {
group = defaultNodeGroup
}
node, present := group.Get(id)
if present {
return node
}
node = &Node{ID: id, load: loader /*paths: make(map[string][]Path)*/}
group.Register(node)
return node
}
// String returns a string representation of the Node.
func (n *Node) String() string {
return n.ID
}
// Equal defines equality of two Nodes.
// Two graph nodes are equal if their IDs are equal, irrespective of the rest of their state.
func (n *Node) Equal(other *Node) bool {
return n.ID == other.ID
}
// Connect bidirectionally connects two graph Nodes.
func (n *Node) Connect(other *Node) {
n.neighbours = appendNodeIfMissing(n.neighbours, other)
other.neighbours = appendNodeIfMissing(other.neighbours, n)
}
// IsNeighbour returns true if the given node is an immediate neighbour of the current node, false otherwise.
func (n *Node) IsNeighbour(other *Node) bool {
for _, neighbour := range n.neighbours {
if other.Equal(neighbour) {
return true
}
}
return false
}
// SetData sets Node data in a thread-safe manner.
func (n *Node) SetData(data interface{}) {
n.lock.Lock()
n.data = data
n.lock.Unlock()
}
// HasData checks for presence of Node data in a thread-safe manner.
func (n *Node) HasData() bool {
result := false
n.lock.Lock()
result = (n.data != nil)
n.lock.Unlock()
return result
}
// PathsTo computes all possible paths from the current node to the target node.
// It returns an empty slice when no paths are available.
func (n *Node) PathsTo(target *Node, args ...interface{}) []Path {
stopAtFirstPath := false
if len(args) > 0 {
stopAtFirstPath = args[0].(bool)
}
chanResults := make(chan []Path)
go n.pathsTo(target, 0, Path{n, target}.String(), stopAtFirstPath, Path{}, chanResults)
paths := <-chanResults
sort.Stable(byPathLength(paths))
return paths
}
func (n *Node) pathsTo(target *Node, depth int, pathID string, stopAtFirstPath bool, currentPath Path, chanResults chan []Path) {
if debug {
tabs(depth)
fmt.Printf("pathsTo(%v, %v, %v, >>%v<<)\n", n, target, depth, currentPath)
}
if stopAtFirstPath && n.group.pathsFound[pathID] {
chanResults <- []Path{}
return
}
// Lazy load Node
loadAttempt := 0
for !n.HasData() {
if debug {
tabs(depth)
fmt.Printf("Loading %v. Attempt %v\n", n.ID, loadAttempt)
}
err := n.load(n)
// Retry loading node after a pause if there was an error while loading
if err != nil {
if loadAttempt > maxLoadAttempts {
if debug {
tabs(depth)
fmt.Printf(">>>>>>>>>>>>>>>>> Failed to load %v. Bailing out.\n", n.ID)
}
chanResults <- []Path{}
return
}
loadAttempt++
time.Sleep(1 * time.Second)
}
}
// Skip if this node has already been visited in the current run
n.group.lock.Lock()
loop := currentPath.Contains(n)
n.group.lock.Unlock()
if loop {
chanResults <- []Path{}
return
}
n.group.lock.Lock()
currentPath = append(currentPath, n)
n.group.lock.Unlock()
if n.Equal(target) {
if debug {
tabs(depth)
fmt.Printf("$$$$$$$$$$$$$ [%v] %v -> %v Found: %v\n", pathID, n, target, currentPath)
}
n.group.lock.Lock()
n.group.pathsFound[pathID] = true
n.group.lock.Unlock()
chanResults <- []Path{currentPath}
// n.paths[target.ID] = append(n.paths[target.ID], currentPath)
return
}
// Search for paths from neighbours
chanNeighbourResults := make(chan []Path)
if depth < n.group.maxRecursionDepth {
for _, neighbour := range n.neighbours {
go neighbour.pathsTo(target, depth+1, pathID, stopAtFirstPath, currentPath, chanNeighbourResults)
}
}
results := []Path{}
for i := 0; i < len(n.neighbours); i++ {
neighbourPaths := <-chanNeighbourResults
results = append(results, neighbourPaths...)
}
//HACK
results = deDuplicatePaths(results)
chanResults <- results
// n.paths[target.ID] = append(n.paths[target.ID], allPaths...)
return
}
func appendNodeIfMissing(nodes []*Node, nodeToAppend *Node) []*Node {
for _, node := range nodes {
if node.Equal(nodeToAppend) {
return nodes
}
}
nodes = append(nodes, nodeToAppend)
return nodes
}
// HACK
func deDuplicatePaths(paths []Path) []Path {
ddMap := make(map[string]Path)
for _, path := range paths {
ddMap[path.String()] = path
}
result := []Path{}
for _, path := range ddMap {
result = append(result, path)
}
return result
}
func tabs(count int) {
for i := 0; i <= count; i++ {
fmt.Printf("\t")
}
}
|
package server
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func BenchmarkFastSimpleServer_NoParam(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkSimpleService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/2", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkFastSimpleServer_WithParam(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkSimpleService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/1/{something}/blah", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkSimpleServer_NoParam(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkSimpleService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/2", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkSimpleServer_WithParam(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkSimpleService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/1/blah/:something", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
type benchmarkSimpleService struct {
fast bool
}
func (s *benchmarkSimpleService) Prefix() string {
return "/svc/v1"
}
func (s *benchmarkSimpleService) Endpoints() map[string]map[string]http.HandlerFunc {
return map[string]map[string]http.HandlerFunc{
"/1/{something}/:something": map[string]http.HandlerFunc{
"GET": s.GetSimple,
},
"/2": map[string]http.HandlerFunc{
"GET": s.GetSimpleNoParam,
},
}
}
func (s *benchmarkSimpleService) Middleware(h http.Handler) http.Handler {
return CORSHandler(h, "")
}
func (s *benchmarkSimpleService) GetSimple(w http.ResponseWriter, r *http.Request) {
something := Vars(r)["something"]
fmt.Fprint(w, something)
}
func (s *benchmarkSimpleService) GetSimpleNoParam(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "ok")
}
func BenchmarkFastJSONServer_JSONPayload(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkJSONService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("PUT", "/svc/v1/1", bytes.NewBufferString(`{"hello":"hi","howdy":"yo"}`))
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkFastJSONServer_NoParam(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkJSONService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("PUT", "/svc/v1/2", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkFastJSONServer_WithParam(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkJSONService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("PUT", "/svc/v1/3/{something}/blah", bytes.NewBufferString(`{"hello":"hi","howdy":"yo"}`))
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkJSONServer_JSONPayload(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkJSONService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("PUT", "/svc/v1/1", bytes.NewBufferString(`{"hello":"hi","howdy":"yo"}`))
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkJSONServer_NoParam(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkJSONService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("PUT", "/svc/v1/2", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkJSONServer_WithParam(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkJSONService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("PUT", "/svc/v1/3/blah/:something", bytes.NewBufferString(`{"hello":"hi","howdy":"yo"}`))
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
type benchmarkJSONService struct {
fast bool
}
func (s *benchmarkJSONService) Prefix() string {
return "/svc/v1"
}
func (s *benchmarkJSONService) JSONEndpoints() map[string]map[string]JSONEndpoint {
return map[string]map[string]JSONEndpoint{
"/1": map[string]JSONEndpoint{
"PUT": s.PutJSON,
},
"/2": map[string]JSONEndpoint{
"GET": s.GetJSON,
},
"/3/{something}/:something": map[string]JSONEndpoint{
"GET": s.GetJSONParam,
},
}
}
func (s *benchmarkJSONService) JSONMiddleware(e JSONEndpoint) JSONEndpoint {
return e
}
func (s *benchmarkJSONService) Middleware(h http.Handler) http.Handler {
return h
}
func (s *benchmarkJSONService) PutJSON(r *http.Request) (int, interface{}, error) {
var hello testJSON
err := json.NewDecoder(r.Body).Decode(&hello)
if err != nil {
return http.StatusBadRequest, nil, err
}
return http.StatusOK, hello, nil
}
func (s *benchmarkJSONService) GetJSON(r *http.Request) (int, interface{}, error) {
return http.StatusOK, &testJSON{"hi", "howdy"}, nil
}
func (s *benchmarkJSONService) GetJSONParam(r *http.Request) (int, interface{}, error) {
something := Vars(r)["something"]
return http.StatusOK, &testJSON{"hi", something}, nil
}
func BenchmarkFastContextSimpleServer_NoParam(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkContextService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/ctx/2", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkFastContextSimpleServer_WithParam(b *testing.B) {
cfg := &Config{RouterType: "fast", HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkContextService{true})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/ctx/1/{something}/blah", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkContextSimpleServer_NoParam(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkContextService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/ctx/2", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
func BenchmarkContextSimpleServer_WithParam(b *testing.B) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkContextService{})
w := httptest.NewRecorder()
r, _ := http.NewRequest("GET", "/svc/v1/ctx/1/blah/:something", nil)
r.RemoteAddr = "0.0.0.0:8080"
for i := 0; i < b.N; i++ {
srvr.ServeHTTP(w, r)
}
}
type testJSON struct {
Hello string `json:"hello"`
Howdy string `json:"howdy"`
}
type testMixedService struct {
fast bool
}
func (s *testMixedService) Prefix() string {
return "/svc/v1"
}
func (s *testMixedService) JSONEndpoints() map[string]map[string]JSONEndpoint {
return map[string]map[string]JSONEndpoint{
"/json": map[string]JSONEndpoint{
"GET": s.GetJSON,
},
}
}
func (s *testMixedService) Endpoints() map[string]map[string]http.HandlerFunc {
return map[string]map[string]http.HandlerFunc{
"/simple": map[string]http.HandlerFunc{
"GET": s.GetSimple,
},
}
}
func (s *testMixedService) GetSimple(w http.ResponseWriter, r *http.Request) {
something := Vars(r)["something"]
fmt.Fprint(w, something)
}
func (s *testMixedService) GetJSON(r *http.Request) (int, interface{}, error) {
return http.StatusOK, &testJSON{"hi", "howdy"}, nil
}
func (s *testMixedService) JSONMiddleware(e JSONEndpoint) JSONEndpoint {
return e
}
func (s *testMixedService) Middleware(h http.Handler) http.Handler {
return h
}
type testInvalidService struct {
fast bool
}
func (s *testInvalidService) Prefix() string {
return "/svc/v1"
}
func (s *testInvalidService) Middleware(h http.Handler) http.Handler {
return h
}
func TestFactory(*testing.T) {
// with config:
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
NewSimpleServer(cfg)
// without config:
NewSimpleServer(nil)
}
func TestBasicRegistration(t *testing.T) {
tests := []struct {
server *SimpleServer
service Service
}{
{
NewSimpleServer(nil),
&benchmarkSimpleService{},
},
{
NewSimpleServer(nil),
&benchmarkJSONService{},
},
{
NewSimpleServer(nil),
&testMixedService{},
},
{
NewSimpleServer(nil),
&benchmarkContextService{},
},
}
for _, test := range tests {
if err := test.server.Register(test.service); err != nil {
t.Errorf("Basic registration of services should not encounter an error: %s\n", err)
}
}
invServer := NewSimpleServer(nil)
if err := invServer.Register(&testInvalidService{}); err == nil {
t.Error("Invalid services should produce an error in service registration")
}
}
func TestSimpleServerCORSMiddleware(t *testing.T) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status"}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkSimpleService{false})
wt := httptest.NewRecorder()
// hit the CORS middlware
r := httptest.NewRequest(http.MethodOptions, "/svc/v1/1/blah/:something", nil)
r.RemoteAddr = "0.0.0.0:8080"
r.Header.Add("Origin", "nytimes.com")
srvr.ServeHTTP(wt, r)
w := wt.Result()
if w.StatusCode != http.StatusOK {
t.Errorf("expected 200 response code, got %d", w.StatusCode)
}
gb, err := ioutil.ReadAll(w.Body)
if err != nil {
t.Fatalf("unable to read response body: %s", err)
}
if gotBody := string(gb); gotBody != "" {
t.Errorf("expected response body to be \"\", got %q", gotBody)
}
if gotOrig := w.Header.Get("Access-Control-Allow-Origin"); gotOrig != "nytimes.com" {
t.Errorf("expected response \"Access-Control-Allow-Origin\" header to be to be \"nytimes.com\", got %q",
gotOrig)
}
}
func TestNotFoundHandler(t *testing.T) {
cfg := &Config{HealthCheckType: "simple", HealthCheckPath: "/status", NotFoundHandler: http.HandlerFunc(http.NotFound)}
srvr := NewSimpleServer(cfg)
RegisterHealthHandler(cfg, srvr.monitor, srvr.mux)
srvr.Register(&benchmarkSimpleService{false})
wt := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "/svc/v1/1/blah", nil)
r.RemoteAddr = "0.0.0.0:8080"
srvr.ServeHTTP(wt, r)
w := wt.Result()
if w.StatusCode != http.StatusNotFound {
t.Errorf("expected 400 response code, got %d", w.StatusCode)
}
gb, err := ioutil.ReadAll(w.Body)
if err != nil {
t.Fatalf("unable to read response body: %s", err)
}
if gotBody := string(gb); gotBody != "404 page not found\n" {
t.Errorf("expected response body to be \"\", got %q", gotBody)
}
}
|
package main
import (
"bufio"
"fmt"
"gopkg.in/gomail.v2"
"log"
"net/mail"
"net/smtp"
"os"
"strings"
)
func main() {
reader := bufio.NewReader(os.Stdin)
var dial = "127.0.0.1:25"
fmt.Print("dial: ")
dial1, _ := reader.ReadString('\n')
dial1 = strings.Replace(dial1, "\n", "", -1)
if dial1 != "" {
dial = dial1
}
// Connect to the remote SMTP server.
client, err := smtp.Dial(dial)
if err != nil {
log.Fatal(err)
}
var sender string
fmt.Print("sender: ")
sender, _ = reader.ReadString('\n')
sender = strings.Replace(sender, "\n", "", -1)
var recipient string
fmt.Print("recipient: ")
recipient, _ = reader.ReadString('\n')
recipient = strings.Replace(recipient, "\n", "", -1)
var attach string
fmt.Print("attach: ")
attach, _ = reader.ReadString('\n')
attach = strings.Replace(attach, "\n", "", -1)
var config configMail
config.Subject = "Hello!"
if sender != "" {
config.From = mail.Address{
Name: "sender",
Address: sender,
}
}
if recipient != "" {
config.To = append(config.To, mail.Address{
Name: "recipient",
Address: recipient,
})
}
config.Cc = config.To
config.Body.ContentType = "text/html"
config.Body.Body = "Hello <b>Bob</b> and <i>Cora</i>!"
if attach != "" {
config.Attach = append(config.Attach, attach)
}
config.Embed = config.Attach
message := gomail.NewMessage()
senderFalse, recipientFalse := convertMailForm(message, config, client)
// Send the email body.
wc, err := client.Data()
if err != nil {
log.Fatal(err)
}
_, err = message.WriteTo(wc)
if err != nil {
log.Fatal(err)
}
err = wc.Close()
if err != nil {
log.Fatal(err)
}
// Send the QUIT command and close the connection.
err = client.Quit()
if err != nil {
log.Fatal(err)
}
if senderFalse != "" {
log.Fatal("senderFalse: ", senderFalse)
}
if len(recipientFalse) > 0 {
log.Fatal("recipientFalse: ", recipientFalse)
}
}
type configMail struct {
Subject string `json:"subject"`
From mail.Address `json:"from"`
To []mail.Address `json:"to"`
Cc []mail.Address `json:"cc"`
Bcc []mail.Address `json:"bcc"`
Body struct {
ContentType string `json:"content_type"`
Body string `json:"body"`
} `json:"body"`
Attach []string `json:"attach"`
Embed []string `json:"embed"`
}
func convertMailForm(message *gomail.Message, config configMail, client *smtp.Client) (senderFalse string, recipientFalse map[string]bool) {
// make default
recipientFalse = map[string]bool{}
message.SetHeader("Subject", config.Subject)
headers := make(map[string][]string)
headers["From"] = []string{config.From.String()}
err := client.Mail(config.From.Address)
if err != nil {
senderFalse = config.From.Address
}
var checkDuplicate = make(map[string]bool)
for _, people := range config.To {
headers["To"] = append(headers["To"], people.String())
var address = people.Address
if !checkDuplicate[address] {
err = client.Rcpt(address)
if err != nil {
recipientFalse[address] = true
}
checkDuplicate[address] = true
}
}
for _, people := range config.Cc {
headers["Cc"] = append(headers["Cc"], people.String())
var address = people.Address
if !checkDuplicate[address] {
err = client.Rcpt(address)
if err != nil {
recipientFalse[address] = true
}
checkDuplicate[address] = true
}
}
for _, people := range config.Bcc {
headers["Bcc"] = append(headers["Bcc"], people.String())
var address = people.Address
if !checkDuplicate[address] {
err = client.Rcpt(address)
if err != nil {
recipientFalse[address] = true
}
checkDuplicate[address] = true
}
}
message.SetHeaders(headers)
message.SetBody(config.Body.ContentType, config.Body.Body)
var checkDuplicateFile = make(map[string]bool)
for _, embed := range config.Embed {
if !checkDuplicateFile[embed] {
message.Attach(embed)
checkDuplicateFile[embed] = true
}
}
for _, attach := range config.Attach {
if !checkDuplicateFile[attach] {
message.Attach(attach)
checkDuplicateFile[attach] = true
}
}
return
}
|
// Package cmd provides command line processing functions
// for the authentication services.
package cmd
import (
"fmt"
"os"
"github.com/dhaifley/dauth/lib"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var rootCmd = &cobra.Command{
Use: lib.ServiceInfo.Name,
Short: lib.ServiceInfo.Short,
Long: lib.ServiceInfo.Long,
}
func init() {
viper.SetConfigFile("dauth_config.yaml")
if err := viper.ReadInConfig(); err != nil {
fmt.Println(err)
}
viper.SetEnvPrefix(lib.ServiceInfo.Name)
viper.SetDefault("sql", "")
if err := viper.BindEnv("sql"); err != nil {
fmt.Println(err)
}
viper.SetDefault("cert", "")
if err := viper.BindEnv("cert"); err != nil {
fmt.Println(err)
}
viper.SetDefault("key", "")
if err := viper.BindEnv("key"); err != nil {
fmt.Println(err)
}
}
// Execute starts the command processor.
func Execute() {
fmt.Println(lib.ServiceInfo.Short)
fmt.Println("Version:", lib.ServiceInfo.Version)
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
package main
//641. 设计循环双端队列
//设计实现双端队列。
//
//实现 MyCircularDeque 类:
//
//MyCircularDeque(int k):构造函数,双端队列最大为 k 。
//boolean insertFront():将一个元素添加到双端队列头部。 如果操作成功返回 true,否则返回 false 。
//boolean insertLast():将一个元素添加到双端队列尾部。如果操作成功返回 true,否则返回 false 。
//boolean deleteFront():从双端队列头部删除一个元素。 如果操作成功返回 true,否则返回 false 。
//boolean deleteLast():从双端队列尾部删除一个元素。如果操作成功返回 true,否则返回 false 。
//int getFront()):从双端队列头部获得一个元素。如果双端队列为空,返回 -1。
//int getRear():获得双端队列的最后一个元素。如果双端队列为空,返回 -1 。
//boolean isEmpty():若双端队列为空,则返回true,否则返回 false 。
//boolean isFull():若双端队列满了,则返回true,否则返回 false 。
//
//
//示例 1:
//
//输入
//["MyCircularDeque", "insertLast", "insertLast", "insertFront", "insertFront", "getRear", "isFull", "deleteLast", "insertFront", "getFront"]
//[[3], [1], [2], [3], [4], [], [], [], [4], []]
//输出
//[null, true, true, true, false, 2, true, true, true, 4]
//
//解释
//MyCircularDeque circularDeque = new MycircularDeque(3); // 设置容量大小为3
//circularDeque.insertLast(1); // 返回 true
//circularDeque.insertLast(2); // 返回 true
//circularDeque.insertFront(3); // 返回 true
//circularDeque.insertFront(4); // 已经满了,返回 false
//circularDeque.getRear(); // 返回 2
//circularDeque.isFull(); // 返回 true
//circularDeque.deleteLast(); // 返回 true
//circularDeque.insertFront(4); // 返回 true
//circularDeque.getFront(); // 返回 4
//
//
//
//提示:
//
//1 <= k <= 1000
//0 <= value <= 1000
//insertFront,insertLast,deleteFront,deleteLast,getFront,getRear,isEmpty,isFull 调用次数不大于2000次
type MyCircularDeque struct {
Front, Last int
Element []int
}
func Constructor(k int) MyCircularDeque {
return MyCircularDeque{Element: make([]int, k+1)}
}
func (this *MyCircularDeque) InsertFront(value int) bool {
if this.IsFull() {
return false
}
this.Front = (this.Front - 1 + len(this.Element)) % len(this.Element)
this.Element[this.Front] = value
return true
}
func (this *MyCircularDeque) InsertLast(value int) bool {
if this.IsFull() {
return false
}
this.Element[this.Last] = value
this.Last = (this.Last + 1) % len(this.Element)
return true
}
func (this *MyCircularDeque) DeleteFront() bool {
if this.IsEmpty() {
return false
}
this.Front = (this.Front + 1) % len(this.Element)
return true
}
func (this *MyCircularDeque) DeleteLast() bool {
if this.IsEmpty() {
return false
}
this.Last = (this.Last - 1 + len(this.Element)) % len(this.Element)
return true
}
func (this *MyCircularDeque) GetFront() int {
if this.IsEmpty() {
return -1
}
return this.Element[this.Front]
}
func (this *MyCircularDeque) GetRear() int {
if this.IsEmpty() {
return -1
}
return this.Element[(this.Last-1+len(this.Element))%len(this.Element)]
}
func (this *MyCircularDeque) IsEmpty() bool {
return this.Front == this.Last
}
func (this *MyCircularDeque) IsFull() bool {
return (this.Last+1)%len(this.Element) == this.Front
}
/**
* Your MyCircularDeque object will be instantiated and called as such:
* obj := Constructor(k);
* param_1 := obj.InsertFront(value);
* param_2 := obj.InsertLast(value);
* param_3 := obj.DeleteFront();
* param_4 := obj.DeleteLast();
* param_5 := obj.GetFront();
* param_6 := obj.GetRear();
* param_7 := obj.IsEmpty();
* param_8 := obj.IsFull();
*/
|
// よくわからんけど嘘解法っぽい
// 普通に数えすぎているのに差分をとるとあっているのが、偶然なのか必然なのかわからん
package main
import "fmt"
const (
n0 = 1
n1 = 1
n2 = 2 * n1
n3 = 3 * n2
n4 = 4 * n3
n5 = 5 * n4
n6 = 6 * n5
n7 = 7 * n6
)
var frac = [8]int{n0, n1, n2, n3, n4, n5, n6, n7}
func count(n int, A [10]int) int {
c := 0
var q []int
for i := 0; i < n; i++ {
var p int
if i == 0 {
p = A[i] - 1
//fmt.Printf("p = %d, A[%d] = %d\n", p, i, A[i])
} else {
tmp := A[i]
p = tmp
for j := 0; j < i; j++ {
if A[j] < tmp {
p--
}
}
// fmt.Printf("p = %d, A[%d] = %d\n", p, i, A[i])
}
q = append(q, A[i])
c += p * frac[n-i-1]
//fmt.Printf("p = %d, frac[%d] = %d, c = %d\n", p, n-i-1, frac[n-i-1], c)
}
return c
}
func main() {
var P, Q [10]int
var n int
fmt.Scan(&n)
for i := 0; i < n; i++ {
fmt.Scan(&P[i])
}
for i := 0; i < n; i++ {
fmt.Scan(&Q[i])
}
v1 := count(n, P)
v2 := count(n, Q)
v := v1 - v2
if v > 0 {
fmt.Println(v)
} else {
fmt.Println(-v)
}
}
|
package syncLog
import (
"fmt"
"sync"
)
func syncLog() func(string) {
mutex := sync.Mutex{}
return func(msg string) {
mutex.Lock()
fmt.Println(msg)
mutex.Unlock()
}
}
var Println func(string) = syncLog() |
package main
import (
"github.com/jyggen/advent-of-go/util"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSolvePartOne(t *testing.T) {
assert.Equal(5, solvePartOne(parseInput("R2, L3")))
assert.Equal(2, solvePartOne(parseInput("R2, R2, R2")))
assert.Equal(12, solvePartOne(parseInput("R5, L5, R5, R3")))
assert.Equal(307, solvePartOne(parseInput(util.ReadFile("input"))))
}
func TestSolvePartTwo(t *testing.T) {
assert.Equal(4, solvePartTwo(parseInput("R8, R4, R4, R8")))
assert.Equal(165, solvePartTwo(parseInput(util.ReadFile("input"))))
}
|
package main
import "testing"
func BenchmarkPipeline(b *testing.B) {
in, out := pipe(1000000)
for i := 0; i < b.N; i++ {
in <- 1
<-out
}
}
|
package bvg
import (
"encoding/binary"
// "fmt"
// "fmt"
"io"
"math"
// "os"
)
// This stores the writer and commands to be written
type Bvg struct {
Writer io.Writer
Reader io.Reader
Points []*Point
Lines []*Line
Circles []*Circle
Triangles []*Triangle
Polys []*Poly
Bezs []*Bez
LineStrips []*LineStrip
}
// A point must lie between (-1,-1) and (1,1)
// anything outside will not be rendered
type Point struct {
X, Y float64
R, G, B, A uint8
}
// Returns a new Point with the given Position
func NewPt(x, y float64) *Point {
return &Point{
X: x,
Y: y,
}
}
func (p *Point) Dist(p1 *Point) float64 {
return math.Sqrt((p.X-p1.X)*(p.X-p1.X) + (p.Y-p1.Y)*(p.Y-p1.Y))
}
func (p *Point) Write(w io.Writer) {
binary.Write(w, binary.LittleEndian, p.X)
binary.Write(w, binary.LittleEndian, p.Y)
binary.Write(w, binary.LittleEndian, p.R)
binary.Write(w, binary.LittleEndian, p.G)
binary.Write(w, binary.LittleEndian, p.B)
binary.Write(w, binary.LittleEndian, p.A)
}
// Returns a new Point with the given position and color
func NewPtCol(x, y float64, r, g, b, a uint8) *Point {
return &Point{
X: x,
Y: y,
R: r,
G: g,
B: b,
A: a,
}
}
func (p *Point) RelPt(l, theta float64) *Point {
return &Point{
X: p.X + l*math.Cos(theta),
Y: p.Y + l*math.Sin(theta),
R: p.R,
G: p.G,
B: p.B,
A: p.A,
}
}
type Line struct {
P1 *Point
P2 *Point
}
func NewLine(p1, p2 *Point) *Line {
return &Line{
P1: p1,
P2: p2,
}
}
type Circle struct {
P *Point
// r is the complete radius of the circle
// the alpha at r is 0
// t is threshold upto which the color of the circle
// does not fade
R, T float64
}
func NewCircle(p *Point, r, t float64) *Circle {
return &Circle{
P: p,
R: r,
T: t,
}
}
type Triangle struct {
P1, P2, P3 *Point
}
func NewTriangle(p1, p2, p3 *Point) *Triangle {
return &Triangle{
P1: p1,
P2: p2,
P3: p3,
}
}
type Poly struct {
Pts []*Point
}
func NewPoly(p ...*Point) *Poly {
return &Poly{
Pts: p,
}
}
type Bez struct {
Pts []*Point
}
func NewBez(Points ...*Point) *Bez {
return &Bez{
Pts: Points,
}
}
type LineStrip struct {
Pts []*Point
}
func NewLineStrip(pts ...*Point) *LineStrip {
return &LineStrip{
Pts: pts,
}
}
// Returns a new bvg struct with the specified writer
func New(writer io.Writer) *Bvg {
return &Bvg{
Writer: writer,
}
}
func (b *Bvg) Encode() {
for _, val := range b.Lines {
b.DrawLine(*val)
}
for _, val := range b.Triangles {
b.DrawTriangle(*val)
}
for _, val := range b.Circles {
b.DrawCircle(*val)
}
for _, val := range b.Polys {
b.DrawPoly(*val)
}
for _, val := range b.LineStrips {
b.DrawLineStrip(*val)
}
}
// Draws a line from point p1 to p2 with the
// colors inside the point
func (b *Bvg) DrawLine(l Line) {
binary.Write(b.Writer, binary.LittleEndian, int8('l'))
l.P1.Write(b.Writer)
l.P2.Write(b.Writer)
}
// draw a triangle from p1, p2, p3
func (b *Bvg) DrawTriangle(t Triangle) {
binary.Write(b.Writer, binary.LittleEndian, int8('t'))
t.P1.Write(b.Writer)
t.P2.Write(b.Writer)
t.P3.Write(b.Writer)
}
// draw a circle with p1 as center and distance between p1
// an p2 as radius with colour gradient from colors of p1 at
// the center to colours of p2 at the circumference
func (b *Bvg) DrawCircle(c Circle) {
binary.Write(b.Writer, binary.LittleEndian, int8('c'))
c.P.Write(b.Writer)
binary.Write(b.Writer, binary.LittleEndian, c.R)
binary.Write(b.Writer, binary.LittleEndian, c.T)
}
// draws a polygon from the points given, it triangulates by
// using a triangle strip from the given points
func (b *Bvg) DrawPoly(p Poly) {
binary.Write(b.Writer, binary.LittleEndian, int8('p'))
binary.Write(b.Writer, binary.LittleEndian, uint32(len(p.Pts)))
// fmt.Println(len(p.Pts))
for _, p := range p.Pts {
p.Write(b.Writer)
}
}
// Draws a bezier curve with 1st and last point being
// Starting and Ending points and the rest inbetween being
// control points
func (b *Bvg) DrawBez(bez Bez) {
binary.Write(b.Writer, binary.LittleEndian, int8('b'))
binary.Write(b.Writer, binary.LittleEndian, uint32(len(bez.Pts)))
for _, p := range bez.Pts {
p.Write(b.Writer)
}
}
// Draw a line strip from the given points
func (b *Bvg) DrawLineStrip(l LineStrip) {
binary.Write(b.Writer, binary.LittleEndian, int8(60))
binary.Write(b.Writer, binary.LittleEndian, uint32(len(l.Pts)))
for _, p := range l.Pts {
p.Write(b.Writer)
}
}
|
package _0_Front_Controller_Pattern
import (
"testing"
)
//步骤 4
//使用 FrontController 来演示前端控制器设计模式。
func TestFrontControllerPattern(t *testing.T) {
frontController := FrontController{}
tests := []struct {
name string
args string
want string
}{
{"HOME", "HOME", "Displaying Home Page"},
{"STUDENT", "STUDENT", "Displaying Student Page"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := frontController.dispatch(tt.args); got != tt.want {
t.Errorf("dispatchRequest() = %v, want %v", got, tt.want)
}
})
}
}
|
package modifiers
import (
"fmt"
"strings"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/capability"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/consts"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate/internal/statefulset/builder"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
var _ envModifier = ServicePortModifier{}
var _ builder.Modifier = ServicePortModifier{}
func NewServicePortModifier(dynakube dynatracev1beta1.DynaKube, capability capability.Capability) ServicePortModifier {
return ServicePortModifier{
dynakube: dynakube,
capability: capability,
}
}
type ServicePortModifier struct {
dynakube dynatracev1beta1.DynaKube
capability capability.Capability
}
func (mod ServicePortModifier) Enabled() bool {
return mod.dynakube.NeedsActiveGateServicePorts()
}
func (mod ServicePortModifier) Modify(sts *appsv1.StatefulSet) error {
baseContainer := kubeobjects.FindContainerInPodSpec(&sts.Spec.Template.Spec, consts.ActiveGateContainerName)
baseContainer.ReadinessProbe.HTTPGet.Port = intstr.FromString(consts.HttpsServicePortName)
baseContainer.Ports = append(baseContainer.Ports, mod.getPorts()...)
baseContainer.Env = append(baseContainer.Env, mod.getEnvs()...)
return nil
}
func (mod ServicePortModifier) getPorts() []corev1.ContainerPort {
return []corev1.ContainerPort{
{
Name: consts.HttpsServicePortName,
ContainerPort: consts.HttpsContainerPort,
},
{
Name: consts.HttpServicePortName,
ContainerPort: consts.HttpContainerPort,
},
}
}
func (mod ServicePortModifier) getEnvs() []corev1.EnvVar {
return []corev1.EnvVar{
{
Name: consts.EnvDtDnsEntryPoint,
Value: mod.buildDNSEntryPoint(),
},
}
}
func (mod ServicePortModifier) buildDNSEntryPoint() string {
return fmt.Sprintf("https://%s/communication", buildServiceHostName(mod.dynakube.Name, mod.capability.ShortName()))
}
// buildServiceHostName converts the name returned by BuildServiceName
// into the variable name which Kubernetes uses to reference the associated service.
// For more information see: https://kubernetes.io/docs/concepts/services-networking/service/
func buildServiceHostName(dynakubeName string, module string) string {
serviceName :=
strings.ReplaceAll(
strings.ToUpper(
capability.BuildServiceName(dynakubeName, module)),
"-", "_")
return fmt.Sprintf("$(%s_SERVICE_HOST):$(%s_SERVICE_PORT)", serviceName, serviceName)
}
|
package verification
import "testing"
func TestConsume(t *testing.T) {
t.Log(Consume("13216817777"))
}
func TestProduce(t *testing.T) {
key := "13216817777"
value := Consume(key)
t.Log(Produce(key, value))
}
|
package grains
import "errors"
const Version = "1"
const maxSquares int = 64
func Square(n int) (uint64, error) {
if n < 1 || n > maxSquares {
return uint64(0), errors.New("Invalid")
}
return uint64(1 << uint(n-1)), nil
}
func Total() uint64 {
return uint64((1 << maxSquares) - 1)
}
|
package resolver
import (
"context"
"shared/grpc/module"
"shared/utility/glog"
"shared/utility/key"
clientv3 "go.etcd.io/etcd/client/v3"
)
// watch status of server
type watcher struct {
client *clientv3.Client
receiver chan []*module.ResolverMessage
}
func newWatcher(ctx context.Context, client *clientv3.Client, service string) (*watcher, error) {
// init server
resp, err := client.Get(ctx, service, clientv3.WithPrefix(), clientv3.WithSerializable())
if err != nil {
return nil, err
}
messages := make([]*module.ResolverMessage, 0, len(resp.Kvs))
for _, v := range resp.Kvs {
messages = append(
messages,
module.NewResolverMessage(key.SubEtcdKey(string(v.Key)), string(v.Value)),
)
}
c := make(chan []*module.ResolverMessage, 1)
if len(messages) > 0 {
c <- messages
}
watcher := &watcher{
client: client,
receiver: c,
}
go watcher.watch(ctx, service, resp.Header.Revision+1)
return watcher, nil
}
func (w *watcher) watch(ctx context.Context, service string, rev int64) {
defer close(w.receiver)
watch := w.client.Watch(ctx, service, clientv3.WithRev(rev), clientv3.WithPrefix())
glog.Infof("start watching service [%s]", service)
for {
select {
case <-ctx.Done():
return
case resp, ok := <-watch:
if !ok {
return
}
if resp.Err() != nil {
return
}
messages := make([]*module.ResolverMessage, 0, len(resp.Events))
for _, v := range resp.Events {
server := ""
ret := key.SplitEtcdKey(string(v.Kv.Key))
if len(ret) >= 2 {
server = ret[1]
}
message := module.NewResolverMessage(server, string(v.Kv.Value))
switch v.Type {
case clientv3.EventTypePut:
message.Event = module.Register
case clientv3.EventTypeDelete:
message.Event = module.Unregister
default:
continue
}
messages = append(messages, message)
}
if len(messages) > 0 {
w.receiver <- messages
}
}
}
}
|
package jobbuilder
type Trigger struct {
}
type SCM struct {
Class string `xml:"class,attr"`
Value string `xml:",chardata"`
}
type project struct {
Description string `xml:"description"`
KeepDependencies bool `xml:"keepDependencies"`
SCM SCM `xml:"scm"`
CanRoam bool `xml:"canRoam"`
Disabled bool `xml:"disabled"`
BlockBuildWhenDownstreamBuilding bool `xml:"blockBuildWhenDownstreamBuilding"`
BlockBuildWhenUpstreamBuilding bool `xml:"blockBuildWhenUpstreamBuilding"`
Triggers Trigger `xml:"triggers,omitempty"`
ConcurrentBuild bool `xml:"concurrentBuild"`
Builders Builders `xml:"builders"`
}
type Builders struct {
HudsonTasksShell HudsonTasksShell `xml:"hudson.tasks.Shell,omitempty"`
}
type HudsonTasksShell struct {
Command string `xml:"command"`
}
type JobCreateRequest struct {
CanRoam bool
Disable bool
BlockBuildWhenDownstreamBuilding bool
BlockBuildWhenUpstreamBuilding bool
ConcurrentBuild bool
Description string
KeepDependencies bool
SCMClass string
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package queue
import (
"encoding/binary"
"reflect"
"testing"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/tcpip/link/sharedmem/pipe"
)
func TestBasicTxQueue(t *testing.T) {
// Tests that a basic transmit on a queue works, and that completion
// gets properly reported as well.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Tx
var state atomicbitops.Uint32
q.Init(pb1, pb2, &state)
// Enqueue two buffers.
b := []TxBuffer{
{nil, 100, 60},
{nil, 200, 40},
}
b[0].Next = &b[1]
const usedID = 1002
const usedTotalSize = 100
if !q.Enqueue(usedID, usedTotalSize, 2, &b[0]) {
t.Fatalf("Enqueue failed on empty queue")
}
// Check the contents of the pipe.
d := rxp.Pull()
if d == nil {
t.Fatalf("Tx pipe is empty after Enqueue")
}
want := []byte{
234, 3, 0, 0, 0, 0, 0, 0, // id
100, 0, 0, 0, // total size
0, 0, 0, 0, // reserved
100, 0, 0, 0, 0, 0, 0, 0, // offset 1
60, 0, 0, 0, // size 1
200, 0, 0, 0, 0, 0, 0, 0, // offset 2
40, 0, 0, 0, // size 2
}
if !reflect.DeepEqual(want, d) {
t.Fatalf("Bad posted packet: got %v, want %v", d, want)
}
rxp.Flush()
// Check that there are no completions yet.
if _, ok := q.CompletedPacket(); ok {
t.Fatalf("Packet reported as completed too soon")
}
// Post a completion.
d = txp.Push(8)
if d == nil {
t.Fatalf("Unable to push to rx pipe")
}
binary.LittleEndian.PutUint64(d, usedID)
txp.Flush()
// Check that completion is properly reported.
id, ok := q.CompletedPacket()
if !ok {
t.Fatalf("Completion not reported")
}
if id != usedID {
t.Fatalf("Bad completion id: got %v, want %v", id, usedID)
}
}
func TestBasicRxQueue(t *testing.T) {
// Tests that a basic receive on a queue works.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Rx
q.Init(pb1, pb2, nil)
// Post two buffers.
b := []RxBuffer{
{100, 60, 1077, 0},
{200, 40, 2123, 0},
}
if !q.PostBuffers(b) {
t.Fatalf("PostBuffers failed on empty queue")
}
// Check the contents of the pipe.
want := [][]byte{
{
100, 0, 0, 0, 0, 0, 0, 0, // Offset1
60, 0, 0, 0, // Size1
0, 0, 0, 0, // Remaining in group 1
0, 0, 0, 0, 0, 0, 0, 0, // User data 1
53, 4, 0, 0, 0, 0, 0, 0, // ID 1
},
{
200, 0, 0, 0, 0, 0, 0, 0, // Offset2
40, 0, 0, 0, // Size2
0, 0, 0, 0, // Remaining in group 2
0, 0, 0, 0, 0, 0, 0, 0, // User data 2
75, 8, 0, 0, 0, 0, 0, 0, // ID 2
},
}
for i := range b {
d := rxp.Pull()
if d == nil {
t.Fatalf("Tx pipe is empty after PostBuffers")
}
if !reflect.DeepEqual(want[i], d) {
t.Fatalf("Bad posted packet: got %v, want %v", d, want[i])
}
rxp.Flush()
}
// Check that there are no completions.
if _, n := q.Dequeue(nil); n != 0 {
t.Fatalf("Packet reported as received too soon")
}
// Post a completion.
d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
if d == nil {
t.Fatalf("Unable to push to rx pipe")
}
copy(d, []byte{
100, 0, 0, 0, // packet size
0, 0, 0, 0, // reserved
100, 0, 0, 0, 0, 0, 0, 0, // offset 1
60, 0, 0, 0, // size 1
0, 0, 0, 0, 0, 0, 0, 0, // user data 1
53, 4, 0, 0, 0, 0, 0, 0, // ID 1
200, 0, 0, 0, 0, 0, 0, 0, // offset 2
40, 0, 0, 0, // size 2
0, 0, 0, 0, 0, 0, 0, 0, // user data 2
75, 8, 0, 0, 0, 0, 0, 0, // ID 2
})
txp.Flush()
// Check that completion is properly reported.
bufs, n := q.Dequeue(nil)
if n != 100 {
t.Fatalf("Bad packet size: got %v, want %v", n, 100)
}
if !reflect.DeepEqual(bufs, b) {
t.Fatalf("Bad returned buffers: got %v, want %v", bufs, b)
}
}
func TestBadTxCompletion(t *testing.T) {
// Check that tx completions with bad sizes are properly ignored.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Tx
var state atomicbitops.Uint32
q.Init(pb1, pb2, &state)
// Post a completion that is too short, and check that it is ignored.
if d := txp.Push(7); d == nil {
t.Fatalf("Unable to push to rx pipe")
}
txp.Flush()
if _, ok := q.CompletedPacket(); ok {
t.Fatalf("Bad completion not ignored")
}
// Post a completion that is too long, and check that it is ignored.
if d := txp.Push(10); d == nil {
t.Fatalf("Unable to push to rx pipe")
}
txp.Flush()
if _, ok := q.CompletedPacket(); ok {
t.Fatalf("Bad completion not ignored")
}
}
func TestBadRxCompletion(t *testing.T) {
// Check that bad rx completions are properly ignored.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Rx
q.Init(pb1, pb2, nil)
// Post a completion that is too short, and check that it is ignored.
if d := txp.Push(7); d == nil {
t.Fatalf("Unable to push to rx pipe")
}
txp.Flush()
if b, _ := q.Dequeue(nil); b != nil {
t.Fatalf("Bad completion not ignored")
}
// Post a completion whose buffer sizes add up to less than the total
// size.
d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
if d == nil {
t.Fatalf("Unable to push to rx pipe")
}
copy(d, []byte{
100, 0, 0, 0, // packet size
0, 0, 0, 0, // reserved
100, 0, 0, 0, 0, 0, 0, 0, // offset 1
10, 0, 0, 0, // size 1
0, 0, 0, 0, 0, 0, 0, 0, // user data 1
53, 4, 0, 0, 0, 0, 0, 0, // ID 1
200, 0, 0, 0, 0, 0, 0, 0, // offset 2
10, 0, 0, 0, // size 2
0, 0, 0, 0, 0, 0, 0, 0, // user data 2
75, 8, 0, 0, 0, 0, 0, 0, // ID 2
})
txp.Flush()
if b, _ := q.Dequeue(nil); b != nil {
t.Fatalf("Bad completion not ignored")
}
// Post a completion whose buffer sizes will cause a 32-bit overflow,
// but adds up to the right number.
d = txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
if d == nil {
t.Fatalf("Unable to push to rx pipe")
}
copy(d, []byte{
100, 0, 0, 0, // packet size
0, 0, 0, 0, // reserved
100, 0, 0, 0, 0, 0, 0, 0, // offset 1
255, 255, 255, 255, // size 1
0, 0, 0, 0, 0, 0, 0, 0, // user data 1
53, 4, 0, 0, 0, 0, 0, 0, // ID 1
200, 0, 0, 0, 0, 0, 0, 0, // offset 2
101, 0, 0, 0, // size 2
0, 0, 0, 0, 0, 0, 0, 0, // user data 2
75, 8, 0, 0, 0, 0, 0, 0, // ID 2
})
txp.Flush()
if b, _ := q.Dequeue(nil); b != nil {
t.Fatalf("Bad completion not ignored")
}
}
func TestFillTxPipe(t *testing.T) {
// Check that transmitting a new buffer when the buffer pipe is full
// fails gracefully.
pb1 := make([]byte, 104)
pb2 := make([]byte, 104)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Tx
var state atomicbitops.Uint32
q.Init(pb1, pb2, &state)
// Transmit twice, which should fill the tx pipe.
b := []TxBuffer{
{nil, 100, 60},
{nil, 200, 40},
}
b[0].Next = &b[1]
const usedID = 1002
const usedTotalSize = 100
for i := uint64(0); i < 2; i++ {
if !q.Enqueue(usedID+i, usedTotalSize, 2, &b[0]) {
t.Fatalf("Failed to transmit buffer")
}
}
// Transmit another packet now that the tx pipe is full.
if q.Enqueue(usedID+2, usedTotalSize, 2, &b[0]) {
t.Fatalf("Enqueue succeeded when tx pipe is full")
}
}
func TestFillRxPipe(t *testing.T) {
// Check that posting a new buffer when the buffer pipe is full fails
// gracefully.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Rx
q.Init(pb1, pb2, nil)
// Post a buffer twice, it should fill the tx pipe.
b := []RxBuffer{
{100, 60, 1077, 0},
}
for i := 0; i < 2; i++ {
if !q.PostBuffers(b) {
t.Fatalf("PostBuffers failed on non-full queue")
}
}
// Post another buffer now that the tx pipe is full.
if q.PostBuffers(b) {
t.Fatalf("PostBuffers succeeded on full queue")
}
}
func TestLotsOfTransmissions(t *testing.T) {
// Make sure pipes are being properly flushed when transmitting packets.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Tx
var state atomicbitops.Uint32
q.Init(pb1, pb2, &state)
// Prepare packet with two buffers.
b := []TxBuffer{
{nil, 100, 60},
{nil, 200, 40},
}
b[0].Next = &b[1]
const usedID = 1002
const usedTotalSize = 100
// Post 100000 packets and completions.
for i := 100000; i > 0; i-- {
if !q.Enqueue(usedID, usedTotalSize, 2, &b[0]) {
t.Fatalf("Enqueue failed on non-full queue")
}
if d := rxp.Pull(); d == nil {
t.Fatalf("Tx pipe is empty after Enqueue")
}
rxp.Flush()
d := txp.Push(8)
if d == nil {
t.Fatalf("Unable to write to rx pipe")
}
binary.LittleEndian.PutUint64(d, usedID)
txp.Flush()
if _, ok := q.CompletedPacket(); !ok {
t.Fatalf("Completion not returned")
}
}
}
func TestLotsOfReceptions(t *testing.T) {
// Make sure pipes are being properly flushed when receiving packets.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var rxp pipe.Rx
rxp.Init(pb1)
var txp pipe.Tx
txp.Init(pb2)
var q Rx
q.Init(pb1, pb2, nil)
// Prepare for posting two buffers.
b := []RxBuffer{
{100, 60, 1077, 0},
{200, 40, 2123, 0},
}
// Post 100000 buffers and completions.
for i := 100000; i > 0; i-- {
if !q.PostBuffers(b) {
t.Fatalf("PostBuffers failed on non-full queue")
}
if d := rxp.Pull(); d == nil {
t.Fatalf("Tx pipe is empty after PostBuffers")
}
rxp.Flush()
if d := rxp.Pull(); d == nil {
t.Fatalf("Tx pipe is empty after PostBuffers")
}
rxp.Flush()
d := txp.Push(sizeOfConsumedPacketHeader + 2*sizeOfConsumedBuffer)
if d == nil {
t.Fatalf("Unable to push to rx pipe")
}
copy(d, []byte{
100, 0, 0, 0, // packet size
0, 0, 0, 0, // reserved
100, 0, 0, 0, 0, 0, 0, 0, // offset 1
60, 0, 0, 0, // size 1
0, 0, 0, 0, 0, 0, 0, 0, // user data 1
53, 4, 0, 0, 0, 0, 0, 0, // ID 1
200, 0, 0, 0, 0, 0, 0, 0, // offset 2
40, 0, 0, 0, // size 2
0, 0, 0, 0, 0, 0, 0, 0, // user data 2
75, 8, 0, 0, 0, 0, 0, 0, // ID 2
})
txp.Flush()
if _, n := q.Dequeue(nil); n == 0 {
t.Fatalf("Dequeue failed when there is a completion")
}
}
}
func TestRxEnableNotification(t *testing.T) {
// Check that enabling nofifications results in properly updated state.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var state atomicbitops.Uint32
var q Rx
q.Init(pb1, pb2, &state)
q.EnableNotification()
if state.Load() != EventFDEnabled {
t.Fatalf("Bad value in shared state: got %v, want %v", state.Load(), EventFDEnabled)
}
}
func TestRxDisableNotification(t *testing.T) {
// Check that disabling nofifications results in properly updated state.
pb1 := make([]byte, 100)
pb2 := make([]byte, 100)
var state atomicbitops.Uint32
var q Rx
q.Init(pb1, pb2, &state)
q.DisableNotification()
if state.Load() != EventFDDisabled {
t.Fatalf("Bad value in shared state: got %v, want %v", state.Load(), EventFDDisabled)
}
}
|
package main
import (
"fmt"
"os"
"bufio"
// "strings"
)
// 文字列を1行入力
func StrStdin() (chan string) {
fmt.Printf("Could you put your name?:")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
a := scanner.Text()
j := make(chan string)
j <- a
return j
}
func main() {
// p := StrStdin()
// fmt.Println(p)
ch00 := make(chan string)
go func() {
// fmt.Println("This is start.")
fmt.Println("Message received. %s\n", <-ch00)
fmt.Println("This is end.")
}()
ch00 = StrStdin()
}
|
package component
import "github.com/maxence-charriere/go-app/v7/pkg/app"
type MainLayout struct {
app.Compo
}
func (l *MainLayout) Render() app.UI {
return app.Div().ID("layout").Class("content").Body(
NewNoteList(),
NewArticle(),
)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jdcloud
import (
"github.com/jdcloud-api/jdcloud-sdk-go/services/asset/apis"
"github.com/jdcloud-api/jdcloud-sdk-go/services/asset/client"
"yunion.io/x/pkg/errors"
)
type SBalance struct {
apis.DescribeAccountAmountResult
}
func (self *SRegion) DescribeAccountAmount() (*SBalance, error) {
req := apis.NewDescribeAccountAmountRequest(self.ID)
client := client.NewAssetClient(self.Credential)
client.Logger = Logger{}
resp, err := client.DescribeAccountAmount(req)
if err != nil {
return nil, errors.Wrapf(err, "DescribeAccountAmoun")
}
return &SBalance{DescribeAccountAmountResult: resp.Result}, nil
}
|
package Routes
import (
"github.com/victorneuret/WatcherUpload/server/Config"
"log"
"net/http"
"os"
)
func remove(_ http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
log.Println(err)
return
}
filePath := r.Form.Get("file")
if filePath == "" {
log.Println("Missing 'file' parameter")
return
}
err = os.RemoveAll(Config.GetConfig().UploadDir + filePath)
if err != nil {
log.Println(err)
return
}
log.Println(filePath, "removed successfully")
}
|
package ledger
import "embed"
//go:embed frontend
var Frontend embed.FS
|
package handler
import (
"errors"
"fmt"
"time"
redis "github.com/tokopedia/go-redis-server"
"github.com/tokopedia/redisgrator/config"
"github.com/tokopedia/redisgrator/connection"
)
type RedisHandler struct {
redis.DefaultHandler
Start time.Time
}
// GET
func (h *RedisHandler) Get(key string) ([]byte, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("GET", key)
//for safety handle v nil and v empty string
if err != nil || v == nil || v == "" {
v, err = origConn.Do("GET", key)
if err != nil {
return nil, errors.New("GET : " + err.Error())
}
} else {
if config.Cfg.General.Duplicate {
//if keys exist in origin move it too destination
_, err := destConn.Do("SET", key, v.([]byte))
if err != nil {
return nil, errors.New("GET : err when set on get : " + err.Error())
}
}
if config.Cfg.General.SetToDestWhenGet && !config.Cfg.General.Duplicate {
_, err = origConn.Do("DEL", key)
if err != nil {
return nil, errors.New("GET : err when del on get : " + err.Error())
}
}
}
strv, ok := v.([]byte)
if ok == false {
if (strv == nil){
return nil, nil
}
return nil, errors.New("GET : keys not found")
}
return strv, nil
}
// DEL
func (h *RedisHandler) Del(key string) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := origConn.Do("DEL", key)
int64v, ok := v.(int64)
if err != nil || int64v == 0 {
v, err = destConn.Do("DEL", key)
if err != nil {
return 0, errors.New("DEL : " + err.Error())
}
}
//check first is it really not error from destination
int64v, ok = v.(int64)
if ok == false {
return 0, errors.New("DEL : value not int from destination")
}
intv := int(int64v)
return intv, nil
}
// SET
func (h *RedisHandler) Set(key string, value []byte) ([]byte, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("SET", key, value)
if err != nil {
return nil, errors.New("SET : err when set : " + err.Error())
}
if config.Cfg.General.Duplicate {
v, err = origConn.Do("SET", key, value)
if err != nil {
return nil, errors.New("SET : err when set duplicate: " + err.Error())
}
}
//could ignore all in origin because set on dest already success
//del old key in origin
if !config.Cfg.General.Duplicate {
origConn.Do("DEL", key)
}
strv, ok := v.(string)
if ok == false {
return nil, errors.New("SET : value not string")
}
return []byte(strv), nil
}
// HEXISTS
func (h *RedisHandler) Hexists(key, field string) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("HEXISTS", key, field)
//check first is it really not error from origin
int64v, ok := v.(int64)
//for safety handle v nil and int64v == 0 int
if err != nil || v == nil || int64v == 0 {
v, err = origConn.Do("HEXISTS", key, field)
if err != nil {
return 0, err
}
} else {
//if this hash is in origin move it to destination
err = moveHash(key)
if err != nil {
return 0, err
}
}
//check first is it really not error from destination
int64v, ok = v.(int64)
if ok == false {
return 0, errors.New("HEXISTS : value not int from destination")
}
intv := int(int64v)
return intv, nil
}
// HGET
func (h *RedisHandler) Hget(key string, value []byte) ([]byte, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("HGET", key, value)
//for safety handle v nil and v == ""
if err != nil || v == nil || v == "" {
v, err = origConn.Do("HGET", key, value)
if err != nil {
return nil, err
}
} else {
if config.Cfg.General.SetToDestWhenGet {
//if this hash is in origin move it to destination
err = moveHash(key)
if err != nil {
return nil, err
}
}
}
if err != nil {
return nil, errors.New("HGET : err when set : " + err.Error())
}
bytv, ok := v.([]byte)
strv := string(bytv)
if ok == false {
return nil, errors.New("HGET : value not string")
}
return []byte(strv), nil
}
// HSET
func (h *RedisHandler) Hset(key, field string, value []byte) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := origConn.Do("EXISTS", key)
if err != nil {
return 0, errors.New("HSET : err when check exist in origin : " + err.Error())
}
if v.(int64) == 1 {
//if hash exists move all hash first to destination
err := moveHash(key)
if err != nil {
return 0, err
}
}
v, err = destConn.Do("HSET", key, field, value)
if err != nil {
return 0, errors.New("HSET : err when set : " + err.Error())
}
if config.Cfg.General.Duplicate {
v, err = origConn.Do("HSET", key, field, value)
if err != nil {
return 0, errors.New("HSET : err when set : " + err.Error())
}
}
int64v, ok := v.(int64)
intv := int(int64v)
if ok == false {
return 0, errors.New("HSET : value not int")
}
return intv, nil
}
// SISMEMBER
func (h *RedisHandler) Sismember(set, field string) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("SISMEMBER", set, field)
if err != nil || v.(int64) == 0 || v == nil {
v, err = origConn.Do("SISMEMBER", set, field)
if err != nil {
return 0, errors.New("SISMEMBER : err when sismember in destination : " + err.Error())
}
} else {
//move all set
err := moveSet(set)
if err != nil {
return 0, err
}
}
int64v, ok := v.(int64)
intv := int(int64v)
if ok == false {
return 0, errors.New("SISMEMBER : value not int")
}
return intv, nil
}
// SMEMBERS
func (h *RedisHandler) Smembers(set string) ([]interface{}, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
var empty []interface{}
v, err := destConn.Do("SMEMBERS", set)
if err != nil || v.([]interface{}) == nil || v == nil {
v, err = origConn.Do("SMEMBERS", set)
if err != nil {
return empty, errors.New("SMEMBERS : err when sismember in destination : " + err.Error())
}
} else {
//move all set
err := moveSet(set)
if err != nil {
return empty, err
}
}
result, ok := v.([]interface{})
if ok == false {
return empty, errors.New("SMEMBERS : value not int")
}
return result, nil
}
// SADD
func (h *RedisHandler) Sadd(set string, val []byte) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := origConn.Do("EXISTS", set)
if err != nil {
return 0, errors.New("SADD : err when check exist in origin : " + err.Error())
}
if v.(int64) == 1 {
//if set exists move all set first to destination
err := moveSet(set)
if err != nil {
return 0, err
}
}
v, err = destConn.Do("SADD", set, val)
if err != nil {
return 0, errors.New("SADD : err when check exist in origin : " + err.Error())
}
if config.Cfg.General.Duplicate {
v, err = origConn.Do("SADD", set, val)
if err != nil {
return 0, errors.New("SADD : err when check exist in origin : " + err.Error())
}
}
int64v, ok := v.(int64)
intv := int(int64v)
if ok == false {
return 0, errors.New("SISMEMBER : value not int")
}
return intv, nil
}
// SREM
func (h *RedisHandler) Srem(set string, val []byte) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("SREM", set, val)
if err != nil {
return 0, errors.New("SREM : err when check exist in origin : " + err.Error())
}
if config.Cfg.General.Duplicate {
v, err = origConn.Do("SREM", set, val)
if err != nil {
return 0, errors.New("SREM : err when check exist in origin : " + err.Error())
}
}
int64v, ok := v.(int64)
intv := int(int64v)
if ok == false {
return 0, errors.New("SREM : value not int")
}
return intv, nil
}
// SETEX
func (h *RedisHandler) Setex(key string, value int, val string) ([]byte, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := destConn.Do("SETEX", key, value, val)
if err != nil {
return nil, errors.New("SETEX : err when set : " + err.Error())
}
if config.Cfg.General.Duplicate {
v, err = origConn.Do("SETEX", key, value, val)
if err != nil {
return nil, errors.New("SETEX : err when set duplicate: " + err.Error())
}
}
//could ignore all in origin because set on dest already success
//del old key in origin
if !config.Cfg.General.Duplicate {
origConn.Do("DEL", key)
}
strv, ok := v.(string)
if ok == false {
return nil, errors.New("SETEX : value not string")
}
return []byte(strv), nil
}
// EXPIRE
func (h *RedisHandler) Expire(key string, value int) (int, error) {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := origConn.Do("EXPIRE", key, value)
if err != nil {
return 0, errors.New("EXPIRE : err when check exist in origin : " + err.Error())
}
if v.(int64) == 1 {
int64v, ok := v.(int64)
if ok == false {
return 0, errors.New("EXPIRE : value not int")
}
intv := int(int64v)
return intv, err
}
if v.(int64) == 0 {
v, err = destConn.Do("EXPIRE", key, value)
if err != nil {
return 0, errors.New("EXPIRE : err when check exist in origin : " + err.Error())
}
if v.(int64) == 1 || v.(int64) == 0 {
int64v, ok := v.(int64)
if ok == false {
return 0, errors.New("EXPIRE : value not int")
}
intv := int(int64v)
return intv, err
}
}
if err != nil {
return 0, errors.New("EXPIRE : err when set : " + err.Error())
}
int64v, ok := v.(int64)
intv := int(int64v)
if ok == false {
return 0, errors.New("EXPIRE : value not int")
}
return intv, nil
}
// INFO
func (h *RedisHandler) Info() ([]byte, error) {
return []byte(fmt.Sprintf(
`#Server
redisgrator 0.0.1
uptime_in_seconds: %d
#Stats
number_of_reads_per_second: %d
`, int(time.Since(h.Start).Seconds()), 0)), nil
}
func moveHash(key string) error {
if config.Cfg.General.MoveHash {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := origConn.Do("HGETALL", key)
if err != nil {
return err
}
//check first is v really array of interface
arrval, ok := v.([]interface{})
if ok == true {
for i, val := range arrval {
valstr := string(val.([]byte))
if i%2 == 0 {
_, err := destConn.Do("HSET", key, valstr, arrval[i+1].([]byte))
if err != nil {
return errors.New("err when set on hexist : " + err.Error())
}
}
}
if !config.Cfg.General.Duplicate {
_, err = origConn.Do("DEL", key)
if err != nil {
return errors.New("err when del on hexist : " + err.Error())
}
}
}
}
return nil
}
func moveSet(set string) error {
if config.Cfg.General.MoveSet {
origConn := connection.RedisPoolConnection.Origin.Get()
destConn := connection.RedisPoolConnection.Destination.Get()
v, err := origConn.Do("SMEMBERS", set)
if err != nil {
return err
}
//check first is v really array of interface
arrval, ok := v.([]interface{})
if ok == true {
for _, val := range arrval {
valstr := string(val.([]byte))
//add all members of set to destination
_, err := destConn.Do("SADD", set, valstr)
if err != nil {
return errors.New("err when set on hexist : keys exist as different type : " + err.Error())
}
}
if !config.Cfg.General.Duplicate {
//delete from origin
_, err = origConn.Do("DEL", set)
if err != nil {
return errors.New("err when del on hexist : " + err.Error())
}
}
}
}
return nil
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tracker
import (
"context"
"fmt"
"sync/atomic"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// lockfreeTracker is a performant implementation of Tracker, at the expense of
// precision. A precise implementation would hold all tracked timestamps in a
// min-heap, but that costs memory and can't be implemented in a lock-free
// manner (at least not by this author). This Tracker generally doesn't know
// exactly what the lowest tracked timestamp is; it just knows a lower-bound on
// it. Concretely, the tracker doesn't maintain information on all tracked
// timestamps; it only maintains a summary in the form of two buckets, each with
// one timestamp and a reference count. Every timestamp in a bucket counts as if
// it was equal to the bucket's timestamp - even though, in reality, they can be
// higher than the bucket's timestamp.
//
// Some combinations of operations are thread-safe, others need the caller to
// ensure mutual exclusion. In particular, insertions (Track()) are "lock free",
// but deletions (Untrack()) are not. Deletions need exclusive access, so the
// caller needs to use a lock; the intention is for that lock to be held in
// "read" mode for insertions and in write mode for deletions. This data
// structure is meant to be used in conjunction with a propBuf, which uses this
// locking model.
//
// Note that this implementation is only reasonable under some assumptions about
// the use: namely that the lifetimes of all the timestamps in the set are
// fairly similar, and that the timestamps tend to increase over time. This
// matches the expectations of requests (identified by their write timestamp),
// with the lifetime being their evaluation duration.
type lockfreeTracker struct {
// tokens returned by Track() contain the pointer-identity of a bucket, so we
// can't swap the buckets in this array in order to maintain b1 at the front.
// Instead, we swap the b1 and b2 pointers to reorder them.
buckets [2]bucket
b1, b2 *bucket
}
// NewLockfreeTracker creates a tracker.
func NewLockfreeTracker() Tracker {
t := lockfreeTracker{}
t.b1 = &t.buckets[0]
t.b2 = &t.buckets[1]
return &t
}
// String cannot be called concurrently with Untrack.
func (t *lockfreeTracker) String() string {
return fmt.Sprintf("b1: %s; b2: %s", t.b1, t.b2)
}
// Track is part of the Tracker interface.
func (t *lockfreeTracker) Track(ctx context.Context, ts hlc.Timestamp) RemovalToken {
// The tracking scheme is based on maintaining (at most) two buckets of
// timestamps, and continuously draining them and creating new buckets. Timestamps
// come in (through this Track()) and enter a bucket. Later, they leave the
// bucket through Untrack(). Each bucket has a bucket timestamp, which is the
// lowest timestamp that ever entered it. A
// bucket's timestamp can be lowered throughout its life, but never increased. A bucket doesn't
// keep track of which timestamps are in it (it only maintains a count), so the
// bucket is unaware of when the the lowest timestamp (i.e. the timestamp that
// set the bucket's timestamp) leaves.
//
// When a bucket is emptied, it gets reset. Future Track() calls can
// re-initialize it with a new timestamp (generally expected to be higher than
// the timestamp it had before the reset).
//
// At any point, LowerBound() returns the first bucket's timestamp. That's a
// lower bound on all the timestamps currently tracked, since b1's timestamp
// is always lower than b2's.
//
// The diagram below tries to give intuition about how the two buckets work.
// It shows two buckets with timestamps 10 and 20, and three timestamps
// entering the set. It explains which bucket each timestamp joins.
//
// ^ time grows upwards | |
// | | |
// | | |
// | ts 25 joins b2 -> | | | |
// | | | | |
// | | | +----+
// | | | b2 ts: 20
// | ts 15 joins b2, -> | |
// | extending it downwards +----+
// | b1 ts: 10
// | ts 5 joins b1, ->
// | extending it downwards
//
// Our goal is to maximize the Tracker's lower bound (i.e. its conservative
// approximation about the lowest tracked timestamp), which is b1's timestamp
// (see below).
//
// - 25 is above both buckets (meaning above the buckets' timestamp), so it
// joins b2. It would be technically correct for it to join b1 too, but it'd
// be a bad idea: if b1 would be slow enough to be on the critical path for b1
// draining (which it likely is, if all the timestamp stay in the set for a
// similar amount of time) then it'd be preventing bumping the lower bound
// from 10 to 20 (which, in practice, would translate in the respective range not
// closing the [10, 20) range of timestamps).
// - 15 is below b2, but above b1. It's not quite as clear cut which
// bucket is the best one to join; if its lifetime is short and
// so it is *not* on the critical path for b1 draining, then it'd be better for
// it to join b1. Once b1 drains, we'll be able to bump the tracker's lower
// bound to 20. On the other hand, if it joins b2, then b2's timestamp comes
// down to 15 and, once b1 drains and 15 is removed from the tracked set, the
// tracker's lower bound would only become 15 (which is worse than 20). But,
// on the third hand, if 15 stays tracked for a while and is on b1's critical
// path, then putting it in b2 would at least allow us to bump the lower bound
// to 15, which is better than nothing. We take this argument, and put it in
// b2.
// - 5 is below both buckets. The only sensible thing to do is putting it
// in b1; otherwise we'd have to extend b2 downwards, inverting b1 and b2.
//
//
// IMPLEMENTATION INVARIANTS:
//
// 1) After a bucket is initialized, its timestamp only gets lower until the
// bucket is reset (i.e. it never increases). This serves to keep the relative
// relation of buckets fixed.
// 2) (a corollary) If both buckets are initialized, b1.timestamp < b2.timestamp.
// 3) If only one bucket is initialized, it is b1. Note that both buckets
// might be uninitialized.
// 4) Initialized buckets are not empty.
b1, b2 := t.b1, t.b2
// The Tracker internally works with int64's, for atomic CAS purposes. So we
// round down the hlc.Timestamp to just its WallTime.
wts := ts.WallTime
// Make sure that there's at least one bucket.
t1, initialized := b1.timestamp()
// Join b1 if wts is below it.
//
// It's possible that multiple requests coming at the same time pass the `wts
// <= t1` check and enter b1, even through b2 is uninitialized. This is not
// ideal; it'd be better if only the lowest request would end up in b1 and the
// others would end up in b2 (or, more generally, if some "low" requests join
// b1 and the rest (the "high" ones) go on to create and join b2). But that's
// harder to implement.
if !initialized || wts <= t1 {
return b1.extendAndJoin(ctx, wts, ts.Synthetic)
}
// We know that b1 < wts. We can technically join either bucket, but we always
// prefer b2 in order to let b1 drain as soon as possible (at which point
// we'll be able to create a new bucket).
return b2.extendAndJoin(ctx, wts, ts.Synthetic)
}
// Untrack is part of the Tracker interface.
func (t *lockfreeTracker) Untrack(ctx context.Context, tok RemovalToken) {
b := tok.(lockfreeToken).b
// Note that atomic ops are not required here, as we hold the exclusive lock.
b.refcnt--
if b.refcnt < 0 {
log.Fatalf(ctx, "negative bucket refcount: %d", b.refcnt)
}
if b.refcnt == 0 {
// Reset the bucket, so that future Track() calls can create a new one.
b.ts = 0
b.synthetic = 0
// If we reset b1, swap the pointers, so that, if b2 is currently
// initialized, it becomes b1. If a single bucket is initialized, we want it
// to be b1.
if b == t.b1 {
t.b1 = t.b2
t.b2 = b
}
}
}
// LowerBound is part of the Tracker interface.
func (t *lockfreeTracker) LowerBound(ctx context.Context) hlc.Timestamp {
// Note that, if b1 is uninitialized, so is b2. If both are initialized,
// b1 < b2. So, we only need to look at b1.
ts, initialized := t.b1.timestamp()
if !initialized {
return hlc.Timestamp{}
}
return hlc.Timestamp{
WallTime: ts,
Logical: 0,
Synthetic: t.b1.isSynthetic(),
}
}
// Count is part of the Tracker interface.
func (t *lockfreeTracker) Count() int {
return int(t.b1.refcnt) + int(t.b2.refcnt)
}
// bucket represent a Tracker bucket: a data structure that coalesces a number
// of timestamps, keeping track only of their count and minimum.
//
// A bucket can be initialized or uninitialized. It's initialized when the ts is
// set.
type bucket struct {
ts int64 // atomic, nanos
refcnt int32 // atomic
synthetic int32 // atomic
}
func (b *bucket) String() string {
ts := atomic.LoadInt64(&b.ts)
if ts == 0 {
return "uninit"
}
refcnt := atomic.LoadInt32(&b.refcnt)
return fmt.Sprintf("%d requests, lower bound: %s", refcnt, timeutil.Unix(0, ts))
}
// timestamp returns the bucket's timestamp. The bool retval is true if the
// bucket is initialized. If false, the timestamp is 0.
func (b *bucket) timestamp() (int64, bool) {
ts := atomic.LoadInt64(&b.ts)
return ts, ts != 0
}
// isSynthetic returns true if the bucket's timestamp (i.e. the bucket's lower
// bound) should be considered a synthetic timestamp.
func (b *bucket) isSynthetic() bool {
return atomic.LoadInt32(&b.synthetic) != 0
}
// extendAndJoin extends the bucket downwards (if necessary) so that its
// timestamp is <= ts, and then adds a timestamp to the bucket. It returns a
// token to be used for removing the timestamp from the bucket.
//
// If the bucket it not initialized, it will be initialized to ts.
func (b *bucket) extendAndJoin(ctx context.Context, ts int64, synthetic bool) lockfreeToken {
// Loop until either we set the bucket's timestamp, or someone else sets it to
// an even lower value.
var t int64
for {
t = atomic.LoadInt64(&b.ts)
if t != 0 && t <= ts {
break
}
if atomic.CompareAndSwapInt64(&b.ts, t, ts) {
break
}
}
// If we created the bucket, then we dictate if its lower bound will be
// considered a synthetic timestamp or not. It's possible that we're now
// inserting a synthetic timestamp into the bucket but, over time, a higher
// non-synthetic timestamp joins. Or, that a lower non-synthetic timestamp
// joins. In either case, the bucket will remain "synthetic" although it'd be
// correct to make it non-synthetic. We don't make an effort to keep the
// synthetic bit up to date within a bucket.
if t == 0 && synthetic {
atomic.StoreInt32(&b.synthetic, 1)
}
atomic.AddInt32(&b.refcnt, 1)
return lockfreeToken{b: b}
}
// lockfreeToken implements RemovalToken.
type lockfreeToken struct {
// The bucket that this timestamp is part of.
b *bucket
}
var _ RemovalToken = lockfreeToken{}
// RemovalTokenMarker implements RemovalToken.
func (l lockfreeToken) RemovalTokenMarker() {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.