text stringlengths 11 4.05M |
|---|
package deadcode
import (
"testing"
"github.com/stephens2424/php/ast"
"github.com/stephens2424/php/parser"
)
func TestDeadFunctions(t *testing.T) {
src := `<?php
$var1 = "a";
function simple() {
$var2 = "b";
$var3 = "c";
}
class fizz {
const buzz = "fizzbuzz";
static function notsimple() {
$var4 = "d";
}
function other() {}
}
fizz::notsimple();
`
p := parser.NewParser()
if _, err := p.Parse("test.php", src); err != nil {
t.Fatal(err)
}
var shouldBeDead = map[string]struct{}{
"simple": struct{}{},
"other": struct{}{},
}
dead := DeadFunctions(p.FileSet, []string{"test.php"})
for _, deadFunc := range dead {
fnName := deadFunc.(*ast.FunctionStmt).Name
if _, ok := shouldBeDead[fnName]; !ok {
t.Errorf("%q was found dead, but shouldn't have been", fnName)
}
delete(shouldBeDead, fnName)
}
for _, fugitive := range shouldBeDead {
t.Errorf("%q should have been found dead, but wasn't", fugitive)
}
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cat
import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
// DataSourceName is an alias for tree.TableName, and is used for views and
// sequences as well as tables.
type DataSourceName = tree.TableName
// DataSource is an interface to a database object that provides rows, like a
// table, a view, or a sequence.
type DataSource interface {
Object
// Name returns the unqualified name of the object.
Name() tree.Name
// CollectTypes returns all user defined types that the column uses.
// This includes types used in default expressions, computed columns,
// and the type of the column itself.
CollectTypes(ord int) (descpb.IDs, error)
}
|
package infrastructure
import (
interfaces "github.com/dev-jpnobrega/api-rest/src/domain/contract/interface"
factory "github.com/dev-jpnobrega/api-rest/src/infrastructure/factory"
handler "github.com/dev-jpnobrega/api-rest/src/infrastructure/http"
echo "github.com/labstack/echo/v4"
)
func Adapter(command interfaces.ICommand, h handler.IHandler) func(c echo.Context) error {
return func(c echo.Context) error {
return h.Handle(c, command)
}
}
// BuildRouters create router APP
func BuildRouters(server *echo.Echo) {
// routeGroup := server.Group("/v1")
server.GET(
"/v1/people",
Adapter(
factory.GetPeopleFactory(),
handler.NewHandler(),
),
)
server.POST(
"/v1/login",
Adapter(
factory.UserLoginFactory(),
handler.NewHandler(),
),
)
}
|
package codes
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/go-chi/chi/v5"
"github.com/jrapoport/gothic/core/codes"
"github.com/jrapoport/gothic/core/context"
"github.com/jrapoport/gothic/hosts/rest"
"github.com/jrapoport/gothic/models/code"
"github.com/jrapoport/gothic/models/types"
"github.com/jrapoport/gothic/models/types/key"
"github.com/jrapoport/gothic/test/tconn"
"github.com/jrapoport/gothic/test/tcore"
"github.com/jrapoport/gothic/test/thttp"
"github.com/jrapoport/gothic/test/tsrv"
"github.com/segmentio/encoding/json"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSignupServer_CreateSignupCodes(t *testing.T) {
t.Parallel()
const testLen = 10
s, _ := tsrv.RESTServer(t, false)
srv := newSignupServer(s)
j := srv.Config().JWT
createCodes := func(tok string, body interface{}) *httptest.ResponseRecorder {
r := thttp.Request(t, http.MethodPost, Codes, tok, nil, body)
if tok != "" {
var err error
r, err = rest.ParseClaims(r, j, tok)
require.NoError(t, err)
}
w := httptest.NewRecorder()
srv.CreateSignupCodes(w, r)
return w
}
// no user id slug
tok := thttp.UserToken(t, j, false, false)
res := createCodes(tok, nil)
assert.NotEqual(t, http.StatusOK, res.Code)
// no admin id
res = createCodes("", nil)
assert.NotEqual(t, http.StatusOK, res.Code)
// bad request
res = createCodes("", []byte("\n"))
assert.NotEqual(t, http.StatusOK, res.Code)
// admin not found
res = createCodes(tok, types.Map{})
assert.NotEqual(t, http.StatusOK, res.Code)
_, tok = tcore.TestUser(t, srv.API, "", true)
res = createCodes(tok, types.Map{
key.Uses: code.InfiniteUse,
key.Count: testLen,
})
assert.Equal(t, http.StatusOK, res.Code)
var list []string
err := json.Unmarshal(res.Body.Bytes(), &list)
require.NoError(t, err)
assert.Len(t, list, testLen)
}
func TestSignupServer_CheckSignupCode(t *testing.T) {
t.Parallel()
s, _ := tsrv.RESTServer(t, false)
srv := newSignupServer(s)
j := srv.Config().JWT
tok := thttp.UserToken(t, j, true, true)
scode, err := srv.API.CreateSignupCode(context.Background(), 0)
require.NoError(t, err)
checkCode := func(cd string) *httptest.ResponseRecorder {
uri := Codes + rest.Root + cd
r := thttp.Request(t, http.MethodGet, uri, tok, nil, nil)
if tok != "" {
r, err = rest.ParseClaims(r, j, tok)
require.NoError(t, err)
}
ctx := chi.NewRouteContext()
ctx.URLParams = chi.RouteParams{
Keys: []string{key.Code},
Values: []string{cd},
}
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, ctx))
w := httptest.NewRecorder()
srv.CheckSignupCode(w, r)
return w
}
// no code
res := checkCode("")
assert.NotEqual(t, http.StatusOK, res.Code)
// bad code
res = checkCode("bad")
assert.NotEqual(t, http.StatusOK, res.Code)
// good code
res = checkCode(scode)
assert.Equal(t, http.StatusOK, res.Code)
var cr map[string]interface{}
err = json.NewDecoder(res.Body).Decode(&cr)
require.NoError(t, err)
assert.Equal(t, scode, cr[key.Code])
assert.True(t, cr[key.Valid].(bool))
// burn the code
conn := tconn.Conn(t, srv.Config())
sc, err := codes.GetSignupCode(conn, scode)
require.NoError(t, err)
require.NotNil(t, sc)
sc.Used = 1
err = conn.Save(sc).Error
require.NoError(t, err)
// used code
res = checkCode(scode)
assert.Equal(t, http.StatusOK, res.Code)
err = json.NewDecoder(res.Body).Decode(&cr)
require.NoError(t, err)
assert.Equal(t, scode, cr[key.Code])
assert.False(t, cr[key.Valid].(bool))
}
func TestSignupServer_DeleteSignupCode(t *testing.T) {
t.Parallel()
s, _ := tsrv.RESTServer(t, false)
srv := newSignupServer(s)
j := srv.Config().JWT
tok := thttp.UserToken(t, j, true, true)
scode, err := srv.API.CreateSignupCode(context.Background(), 0)
require.NoError(t, err)
deleteCode := func(cd string) *httptest.ResponseRecorder {
uri := Codes + rest.Root + cd
r := thttp.Request(t, http.MethodDelete, uri, tok, nil, nil)
if tok != "" {
r, err = rest.ParseClaims(r, j, tok)
require.NoError(t, err)
}
ctx := chi.NewRouteContext()
ctx.URLParams = chi.RouteParams{
Keys: []string{key.Code},
Values: []string{cd},
}
r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, ctx))
w := httptest.NewRecorder()
srv.DeleteSignupCode(w, r)
return w
}
// bad code
res := deleteCode("bad")
assert.NotEqual(t, http.StatusOK, res.Code)
// delete code
res = deleteCode(scode)
assert.Equal(t, http.StatusOK, res.Code)
// code is gone
conn := tconn.Conn(t, srv.Config())
sc, err := codes.GetSignupCode(conn, scode)
assert.Error(t, err)
assert.Nil(t, sc)
}
|
package apns
import (
"fmt"
)
type NotificationError struct {
Command uint8
Status uint8
Identifier uint32
OtherError error
}
// Make a new NotificationError with error response p and error err.
// If send in a 6-length p and non-nil err sametime, will ignore err and parse p.
func NewNotificationError(p []byte, err error) (e NotificationError) {
if len(p) != 1+1+4 {
if err != nil {
e.OtherError = err
return
}
e.OtherError = fmt.Errorf("Wrong data format, [%x]", p)
return
}
e.Command = uint8(p[0])
e.Status = uint8(p[1])
e.Identifier = uint32(p[2])<<24 + uint32(p[3])<<16 + uint32(p[4])<<8 + uint32(p[5])
return
}
func (e NotificationError) Error() string {
if e.OtherError != nil {
return e.OtherError.Error()
}
if e.Command != 8 {
return fmt.Sprintf("Unknow error, command(%d), status(%d), id(%x)", e.Command, e.Status, e.Identifier)
}
status := ""
switch e.Status {
case 0:
status = "No errors encountered"
case 1:
status = "Processing error"
case 2:
status = "Missing device token"
case 3:
status = "Missing topic"
case 4:
status = "Missing payload"
case 5:
status = "Invalid token size"
case 6:
status = "Invalid topic size"
case 7:
status = "Invalid payload size"
case 8:
status = "Invalid token"
default:
status = "None (unknown)"
}
return fmt.Sprintf("%s(%d): id(%x)", status, e.Status, e.Identifier)
}
func (e NotificationError) String() string {
return e.Error()
}
|
package cmd
// resetFlags reset all flags back to the default values
func resetFlags() {
rootFlags.json = false
rootFlags.quiet = false
}
|
package main
type XMainHook struct {
}
//程序启动后调用
func (p *XMainHook) OnStartup() (err error) {
return
}
//加载配置文件之前调用
func (p *XMainHook) OnBeforeLoadConfig() (err error) {
return
}
//加载配置文件之后,服务正式运行之前调用
func (p *XMainHook) OnAfterLoadConfig() (err error) {
return
}
//热加载配置之前调用
func (p *XMainHook) OnBeforeReloadConfig() (err error) {
return
}
//热加载配置之后调用
func (p *XMainHook) OnAfterReloadConfig() (err error) {
return
}
//程序退出时调用
func (p *XMainHook) OnShutdown() (err error) {
return
}
|
package main
import "fmt"
/*
!!! main article: http://www.sunshine2k.de/articles/coding/crc/understanding_crc.html
http://programm.ws/page.php?id=663
http://ru.bmstu.wiki/%D0%97%D0%B5%D1%80%D0%BA%D0%B0%D0%BB%D1%8C%D0%BD%D1%8B%D0%B9_%D1%82%D0%B0%D0%B1%D0%BB%D0%B8%D1%87%D0%BD%D1%8B%D0%B9_%D0%B0%D0%BB%D0%B3%D0%BE%D1%80%D0%B8%D1%82%D0%BC_CRC32_(asm_x86)
http://www.zlib.net/crc_v3.txt
Online check tool: https://www.ghsi.de/CRC/index.php?Polynom=100010011&Message=12345678
CRC supported list : http://protocoltool.sourceforge.net/CRC%20list.html
*/
const (
polinom8 uint8 = 0x13
polinom16 uint16 = 0x1021
polinom32 uint32 = 0x12211221
bitsInByte int = 8
// tablesize should not be more than 256
tablesize int = 256
)
var crc8table [tablesize]uint8
var crc16table [tablesize]uint16
var crc32table [tablesize]uint32
// --- MAIN ---
func main() {
// Here we generating precomputed values for each byte in 0..255, check the polinom also
crc8tableGenerator()
crc16tableGenerator()
crc32tableGenerator()
// Just check that tables was generated and not emty
fmt.Println("=== table of uint8 remainders ===")
showGeneratedTable8(&crc8table)
fmt.Printf("\n=== table of uint16 remainders ===\n")
showGeneratedTable16(&crc16table)
fmt.Printf("\n=== table of uint32 remainders ===\n")
showGeneratedTable32(&crc32table)
// Using crc calculation for slice of byte
message := []uint8{0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE}
fmt.Printf("\n=== crc results for provided message ===\n")
fmt.Printf("crc8: 0x%x\n", crc8(message))
fmt.Printf("crc16: 0x%x\n", crc16(message))
fmt.Printf("crc32: 0x%x\n\n", crc32(message))
}
// --- END OF MAIN ---
func crc8(message []uint8) uint8 {
var crc uint8
for i := 0; i < len(message); i++ {
crc ^= message[i]
crc = crc8table[crc]
}
return crc
}
func crc16(message []uint8) uint16 {
var crc uint16
for i := 0; i < len(message); i++ {
crc ^= uint16(message[i]) << 8
crc = crc16table[crc>>8] ^ ((crc & 0x00FF) << 8)
}
return crc
}
func crc32(message []uint8) uint32 {
var crc uint32
for i := 0; i < len(message); i++ {
crc ^= uint32(message[i]) << 24
crc = crc32table[crc>>24] ^ ((crc & 0x00FFFFFF) << 8)
}
return crc
}
func crc8tableGenerator() {
for y := 0; y < tablesize; y++ {
remainder := byte(y)
for i := 0; i < bitsInByte; i++ {
if remainder&0x80 != 0 {
remainder = (remainder << 1) ^ polinom8
} else {
remainder = (remainder << 1)
}
}
crc8table[y] = remainder
}
}
func crc16tableGenerator() {
for y := 0; y < tablesize; y++ {
remainder := uint16(y) << 8
for i := 0; i < bitsInByte; i++ {
if remainder&0x8000 != 0 {
remainder = (remainder << 1) ^ polinom16
} else {
remainder = remainder << 1
}
}
crc16table[y] = remainder
}
}
func crc32tableGenerator() {
for y := 0; y < tablesize; y++ {
remainder := uint32(y) << 24
for i := 0; i < bitsInByte; i++ {
if remainder&0x80000000 != 0 {
remainder = (remainder << 1) ^ polinom32
} else {
remainder = remainder << 1
}
}
crc32table[y] = remainder
}
}
func showGeneratedTable8(t *[tablesize]uint8) {
for y := 0; y < tablesize; y++ {
if y != tablesize-1 {
fmt.Printf("0x%02X, ", t[y])
} else {
fmt.Printf("0x%02X\n", t[y])
}
}
}
func showGeneratedTable16(t *[tablesize]uint16) {
for y := 0; y < tablesize; y++ {
if y != tablesize-1 {
fmt.Printf("0x%04X, ", t[y])
} else {
fmt.Printf("0x%04X\n", t[y])
}
}
}
func showGeneratedTable32(t *[tablesize]uint32) {
for y := 0; y < tablesize; y++ {
if y != tablesize-1 {
fmt.Printf("0x%X, ", t[y])
} else {
fmt.Printf("0x%X\n", t[y])
}
}
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package graphics
import (
"context"
"time"
"chromiumos/tast/local/crostini"
"chromiumos/tast/local/graphics/glbench"
"chromiumos/tast/testing"
)
// Tast framework requires every subtest's Val have the same reflect type.
// This is a wrapper to wrap interface together.
type config struct {
config glbench.Config
}
func init() {
testing.AddTest(&testing.Test{
Func: GLBench,
LacrosStatus: testing.LacrosVariantUnknown,
Desc: "Run glbench (a benchmark that times graphics intensive activities), check results and report its performance",
Contacts: []string{
"andrescj@chromium.org",
"pwang@chromium.org",
"chromeos-gfx@google.com",
"oka@chromium.org", // Tast port
},
SoftwareDeps: []string{"no_qemu"},
Vars: []string{"keepState", "ui.gaiaPoolDefault"},
Params: []testing.Param{
// Parameters generated by glbench_test.go. DO NOT EDIT.
{
Name: "",
Val: config{config: &glbench.CrosConfig{}},
Timeout: 3 * time.Hour,
ExtraAttr: []string{"group:graphics", "graphics_nightly"},
Fixture: "graphicsNoChrome",
}, {
Name: "hasty",
Val: config{config: &glbench.CrosConfig{Hasty: true}},
ExtraAttr: []string{"group:mainline", "informational"},
Timeout: 5 * time.Minute,
Fixture: "graphicsNoChrome",
}, {
Name: "crostini",
ExtraAttr: []string{"group:graphics", "graphics_nightly"},
ExtraData: []string{crostini.GetContainerMetadataArtifact("buster", false), crostini.GetContainerRootfsArtifact("buster", false)},
ExtraSoftwareDeps: []string{"chrome", "crosvm_gpu", "vm_host", "dlc"},
Pre: crostini.StartedByDlcBuster(),
Timeout: 1 * time.Hour,
Val: config{config: &glbench.CrostiniConfig{}},
}, {
Name: "crostini_hasty_stable",
ExtraAttr: []string{"group:graphics", "graphics_perbuild", "group:mainline", "informational"},
ExtraData: []string{crostini.GetContainerMetadataArtifact("buster", false), crostini.GetContainerRootfsArtifact("buster", false)},
ExtraSoftwareDeps: []string{"chrome", "crosvm_gpu", "vm_host", "dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Pre: crostini.StartedByDlcBuster(),
Timeout: 5 * time.Minute,
Val: config{config: &glbench.CrostiniConfig{Hasty: true}},
}, {
Name: "crostini_hasty_unstable",
ExtraAttr: []string{"group:graphics", "graphics_perbuild", "group:mainline", "informational"},
ExtraData: []string{crostini.GetContainerMetadataArtifact("buster", false), crostini.GetContainerRootfsArtifact("buster", false)},
ExtraSoftwareDeps: []string{"chrome", "crosvm_gpu", "vm_host", "dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Pre: crostini.StartedByDlcBuster(),
Timeout: 5 * time.Minute,
Val: config{config: &glbench.CrostiniConfig{Hasty: true}},
},
},
})
}
func GLBench(ctx context.Context, s *testing.State) {
config := s.Param().(config).config
value := s.PreValue()
if value == nil {
value = s.FixtValue()
}
if err := glbench.Run(ctx, s.OutDir(), value, config); err != nil {
s.Fatal("GLBench fails: ", err)
}
}
|
package main
import (
"fmt"
"os"
"github.com/unixpickle/gocube"
)
func main() {
sc, err := gocube.InputStickerCube()
if err != nil {
fmt.Println("Failed to read stickers:", err)
os.Exit(1)
}
cc, err := sc.CubieCube()
if err != nil {
fmt.Println("Invalid stickers:", err)
os.Exit(1)
}
fmt.Println("Solving...")
solver := gocube.NewSolver(*cc, 30)
for solution := range solver.Solutions() {
fmt.Println("Solution:", solution, "-", len(solution), "moves")
}
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func getLuckyNum(c chan<- int) {
fmt.Println("...")
// 占いにかかる時間はランダム
rand.Seed(time.Now().Unix())
time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond)
num := rand.Intn(10)
c <- num
}
func main() {
fmt.Println("what is today's lucky number?")
c := make(chan int)
go getLuckyNum(c)
num := <-c
fmt.Printf("Today's your lucky number is %d!\n", num)
}
|
package main
import (
"bytes"
"fmt"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/bchadwic/gh-graph/pkg/color"
"github.com/bchadwic/gh-graph/pkg/stats"
lg "github.com/charmbracelet/lipgloss"
"github.com/cli/cli/git"
"github.com/spf13/cobra"
)
func main() {
graph := NewCmdGraph()
if err := graph.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
const (
WeeksInAYear = 52
DaysInAWeek = 7
)
type GraphOptions struct {
Username string
Matrix bool
Solid bool
}
func NewCmdGraph() *cobra.Command {
opts := &GraphOptions{}
cmd := &cobra.Command{
Use: "graph",
Short: "Display your GitHub contribution graph",
Long: "Display your GitHub contribution graph in the terminal",
RunE: func(cmd *cobra.Command, args []string) error {
return runGraph(opts)
},
SilenceErrors: true,
SilenceUsage: true,
}
cmd.Flags().StringVarP(&opts.Username, "username", "u", "", "Specify a user")
cmd.Flags().BoolVarP(&opts.Matrix, "matrix", "m", false, "Set cells to matrix digital rain")
cmd.Flags().BoolVarP(&opts.Solid, "solid", "s", false, "Set cells to solid blocks")
return cmd
}
func runGraph(opts *GraphOptions) error {
if opts.Username == "" {
_, err := git.GitCommand()
if err != nil {
return err
}
if output, err := exec.Command("git", "config", "user.name").Output(); err != nil {
return err
} else {
opts.Username = firstLine(output)
}
}
resp, err := http.Get("https://github.com/" + opts.Username)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
if resp.StatusCode == 404 {
return fmt.Errorf("user %s was not found on GitHub, choose a new user with -u / --user", opts.Username)
}
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return err
}
graph, stats := getGraph(doc)
printGraph(opts, graph, stats)
return nil
}
func printGraph(opts *GraphOptions, graph [][]int, stats *stats.Stats) {
cp := &color.ColorPalette{}
cp.Initialize(stats)
DaysOfTheWeek := []string{" ", "Mon ", " ", "Wed ", " ", "Fri ", " "}
b := strings.Builder{}
for i, x := range graph {
b.WriteString(DaysOfTheWeek[i])
for _, y := range x {
s := lg.Style{}
if y != 0 {
s = lg.NewStyle().SetString("#").Foreground(lg.Color(cp.GetColor(y)))
} else {
s = lg.NewStyle().SetString(" ")
}
b.WriteString(s.String())
}
b.WriteString("\n")
}
b.WriteString(
fmt.Sprintf("%s\ncontributions in the last year: %d\nlongest streak: %d, average: %.3f/day, best day: %d",
"github.com/"+opts.Username, stats.TotalContributions, stats.LongestStreak, stats.AveragePerDay, stats.BestDay))
dialogBoxStyle := lg.NewStyle().SetString(b.String()).
Border(lg.RoundedBorder()).
Margin(1).
BorderTop(true).
BorderLeft(true).
BorderRight(true).
BorderBottom(true)
fmt.Println(dialogBoxStyle)
return
}
func getGraph(doc *goquery.Document) ([][]int, *stats.Stats) {
graph := make([][]int, DaysInAWeek)
for i := 0; i < DaysInAWeek; i++ {
graph[i] = make([]int, WeeksInAYear+1)
}
stats := &stats.Stats{
TotalContributions: 0,
LongestStreak: 0,
AveragePerDay: 0,
BestDay: 0,
}
k := -1
count := 0
curr := 0
doc.Find(".js-calendar-graph rect[data-count]").Each(func(i int, s *goquery.Selection) {
cell, exists := s.Attr("data-count")
j := i % DaysInAWeek
if j == 0 {
k++
}
if exists {
contribution, _ := strconv.Atoi(cell)
count++
if contribution > 0 {
stats.TotalContributions += contribution
curr++
if contribution > stats.BestDay {
stats.BestDay = contribution
}
} else {
if curr > stats.LongestStreak {
stats.LongestStreak = curr
}
curr = 0
}
graph[j][k] = contribution
}
})
stats.AveragePerDay = float32(stats.TotalContributions) / float32(count)
return graph, stats
}
func firstLine(output []byte) string {
if i := bytes.IndexAny(output, "\n"); i >= 0 {
return string(output)[0:i]
}
return string(output)
}
|
package service
import (
"github.com/bitschain/panicgo/model"
"github.com/manveru/faker"
"github.com/stretchr/testify/assert"
"testing"
)
func TestPanicService_CreateNewUser(t *testing.T) {
s := GetTestService()
fakeUser, _ := faker.New("en")
data := model.UserRegister{Name: fakeUser.Name(), Email: fakeUser.Email(), Password: "123456"}
user, err := s.RegisterUser(data)
assert.Nil(t, err)
assert.Equal(t, data.Name, user.Name)
assert.Equal(t, data.Email, user.Email)
}
|
package models
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
// 配送方式数据格式
type Delivery struct {
DeliveryId int64 `json:"delivery_id" bson:"delivery_id"` //配送方式id 这个id由前端生成
ComId int64 `json:"com_id" bson:"comid"` //公司id
DeliveryCom string `json:"delivery_com" bson:"deliverycom"` // 配送公司
DeliveryPerson string `json:"delivery_person" bson:"deliveryperson"` // 配送员
Phone string `json:"phone" bson:"phone"` // 配送员电话
Config string `json:"config" bson:"config"` // 配置参数
IsUsing bool `json:"is_using" bson:"is_using"` // 是否启用
}
type DeliveryResult struct {
Delivery []Delivery `json:"delivery"`
}
func getDeliveryCollection() *mongo.Collection {
return Client.Collection("delivery")
}
func SelectDeliveryByComID(comID int64)(*DeliveryResult, error) {
cur, err := getDeliveryCollection().Find(context.TODO(), bson.M{"comid": comID})
if err != nil {
return nil , err
}
var rs = new(DeliveryResult)
for cur.Next(context.TODO()){
var delivery Delivery
err = cur.Decode(&delivery)
rs.Delivery = append(rs.Delivery, delivery)
}
return rs, nil
}
func UpdateDeliveryIsUsingFlag(comID int64, Delivery []int64, isUsing bool) error {
_, err := getDeliveryCollection().UpdateMany(context.TODO(),
bson.M{"delivery_id": bson.M{"$in": Delivery}, "comid": comID},
bson.M{"$set": bson.M{"is_using": isUsing}})
return err
}
|
// Copyright 2017 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package chrome
import (
"context"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome/internal/extension"
)
// ComputeExtensionID computes the 32-character ID that Chrome will use for an unpacked
// extension in dir. The extension's manifest file must contain the "key" field.
// Use the following command to generate a new key:
// openssl genrsa 2048 | openssl rsa -pubout -outform der | openssl base64 -A
func ComputeExtensionID(dir string) (string, error) {
return extension.ComputeExtensionID(dir)
}
// AddTastLibrary introduces tast library into the page for the given conn.
// This introduces a variable named "tast" to its scope, and it is the
// caller's responsibility to avoid the conflict.
func AddTastLibrary(ctx context.Context, conn *Conn) error {
// Ensure the page is loaded so the tast library will be added properly.
if err := conn.WaitForExpr(ctx, `document.readyState === "complete"`); err != nil {
return errors.Wrap(err, "failed waiting for page to load")
}
return conn.Eval(ctx, extension.TastLibraryJS, nil)
}
// ExtensionBackgroundPageURL returns the URL to the background page for
// the extension with the supplied ID.
func ExtensionBackgroundPageURL(extID string) string {
return extension.BackgroundPageURL(extID)
}
|
package events
// Alert simple alert struct
type Alert struct {
ResourceID string `json:"resourceId"`
AlertRuleName string `json:"alertRuleName"`
AccountName string `json:"accountName"`
ResourceRegionID string `json:"resourceRegionId"`
CloudType string `json:"cloudType"`
AlertID string `json:"alertId"`
Severity string `json:"severity"`
PolicyName string `json:"policyName"`
ResourceName string `json:"resourceName"`
ResourceRegion string `json:"resourceRegion"`
AccountID string `json:"accountId"`
PolicyID string `json:"policyId"`
}
|
package main
import (
"fmt"
"sort"
)
func main() {
m := map[int]string{}
m[1] = "ok"
m[2] = "no"
fmt.Println(m)
fmt.Println(m[1])
delete(m, 2)
fmt.Println(m[2])
mm := map[int]map[int]string{}
mm[1] = map[int]string{}
a, ok := mm[2][1]
//map一定要做检查和MAKE 否则会发生运行时错误
if !ok {
mm[2] = map[int]string{}
}
mm[2][1] = "good ok"
a, ok = mm[2][1]
fmt.Println(a, ok)
sm := make([]map[int]string, 5)
for i := range sm {
sm[i] = make(map[int]string, 1)
sm[i][1]="ok"
//smVal 只会做值拷贝 而不是引用
fmt.Println(sm[i])
}
fmt.Println(sm)
mmap := map[int]string{1:"a", 2:"b",3:"c",4:"d"}//map 无序,但是可以通过数组间接排序
ss := make([]int, len(mmap))
i := 0
for kkk,_ := range mmap {
ss[i] = kkk
i++
}
sort.Ints(ss)
fmt.Println(ss)
//map sort
//------homework--//
homeM1 := map[int]string{1:"a",2:"b",3:"c"}
//key 和VAL对掉
//homeM2 := map[string]int{"a":1, "b":2, "c":3}
homeM3 := make(map[string]int, len(homeM1))
homeI := 1
for homeK,homeV:= range homeM1 {
homeM3[homeV] = homeK
homeI++
}
fmt.Println(homeM1)
fmt.Println(homeM3)
}
|
package opsgenie
import (
"flag"
"testing"
"github.com/codegangsta/cli"
)
func TestMandatoryFlagsNotProvided(t *testing.T) {
flagsTestHelper(t, mandatoryFlags, createCli("", "", ""))
}
func TestMandatoryFlagsNameNotProvided(t *testing.T) {
flagsTestHelper(t, mandatoryFlags, createCli("key", "", ""))
}
func TestMandatoryFlagsApiKeyNotProvided(t *testing.T) {
flagsTestHelper(t, mandatoryFlags, createCli("", "name", ""))
}
func TestFlagWrongValue(t *testing.T) {
flagsTestHelper(t, intervalWrong, createCli("key", "name", "fake"))
}
func TestAllKeysProvided(t *testing.T) {
ops := extractArgs(createCliAll("apiKey", "name", "intervalUnit", "description", 11, true))
if ops.ApiKey != "apiKey" && ops.Name != "name" && ops.Description != "description" && ops.Interval != 11 && ops.IntervalUnit != "intervalUnit" && ops.Delete != true {
t.Errorf("OpsArgs struct not correct [%+v]", ops)
}
}
func flagsTestHelper(t *testing.T, msg string, c *cli.Context) {
var incomingMsg string
logAndExit = func(msg string) {
incomingMsg = msg
}
extractArgs(c)
if incomingMsg != msg {
t.Errorf("Wrong error message [%s]", incomingMsg)
}
}
func createCli(apiKey string, name string, intervalUnit string) *cli.Context {
return createCliAll(apiKey, name, intervalUnit, "", 0, true)
}
func createCliAll(apiKey string, name string, intervalUnit string, description string, interval int, delete bool) *cli.Context {
globalSet := flag.NewFlagSet("testGlobal", 0)
globalSet.String("apiKey", apiKey, "")
globalSet.String("name", name, "")
set := flag.NewFlagSet("test", 0)
set.String("description", description, "")
set.Int("interval", interval, "")
set.String("intervalUnit", intervalUnit, "")
set.Bool("delete", delete, "")
return cli.NewContext(nil, set, globalSet)
}
|
package vault_test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"testing"
"github.com/ecadlabs/signatory/pkg/config"
"github.com/ecadlabs/signatory/pkg/vault"
)
type HandleFunc func(req *http.Request) (*http.Response, error)
type MockClient struct {
DoFunc HandleFunc
}
func (m *MockClient) Do(req *http.Request) (*http.Response, error) {
if m.DoFunc != nil {
return m.DoFunc(req)
}
return &http.Response{}, nil
}
func mockLogin() func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
body := ioutil.NopCloser(bytes.NewBufferString(`{ "access_token" : "test"}`))
return &http.Response{
StatusCode: 200,
Body: body,
Header: make(http.Header),
}, nil
}
}
func mockSign(body string, status int) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
body := ioutil.NopCloser(bytes.NewBufferString(body))
return &http.Response{
StatusCode: status,
Body: body,
Header: make(http.Header),
}, nil
}
}
func mockRequest(loginFunc HandleFunc, signFunc HandleFunc) func(req *http.Request) (*http.Response, error) {
return func(req *http.Request) (*http.Response, error) {
if req.Host == "login.microsoftonline.com" {
return loginFunc(req)
} else {
return signFunc(req)
}
}
}
func TestAzureSign(t *testing.T) {
do := mockRequest(mockLogin(), mockSign(`{ "kid" : "test", "value": "123455"}`, 200))
az := vault.NewAzureVault(config.AzureConfig{}, &MockClient{do})
bytesToSign := []byte{0x03, 0xff, 0x33}
key := &vault.AzureKey{
Key: vault.AzureKeyDetail{
ID: "test",
},
}
sig, err := az.Sign(context.TODO(), bytesToSign, key)
if err != nil {
fmt.Printf("Unexpected error was thrown: %s\n", err.Error())
t.Fail()
}
expected := []byte{215, 109, 248, 231}
if string(sig) != string(expected) {
fmt.Printf("Expected %s got %s\n", string(expected), string(sig))
t.Fail()
}
}
func TestAzureSignError(t *testing.T) {
do := mockRequest(mockLogin(), mockSign(`Key not found`, 404))
az := vault.NewAzureVault(config.AzureConfig{}, &MockClient{do})
bytesToSign := []byte{0x03, 0xff, 0x33}
_, err := az.Sign(context.TODO(), bytesToSign, nil)
if err == nil {
fmt.Printf("Expected error got nothing\n")
t.Fail()
}
}
|
package main
//1822. 数组元素积的符号
//已知函数signFunc(x) 将会根据 x 的正负返回特定值:
//
//如果 x 是正数,返回 1 。
//如果 x 是负数,返回 -1 。
//如果 x 是等于 0 ,返回 0 。
//给你一个整数数组 nums 。令 product 为数组 nums 中所有元素值的乘积。
//
//返回 signFunc(product) 。
//
//
//
//示例 1:
//
//输入:nums = [-1,-2,-3,-4,3,2,1]
//输出:1
//解释:数组中所有值的乘积是 144 ,且 signFunc(144) = 1
//示例 2:
//
//输入:nums = [1,5,0,2,-3]
//输出:0
//解释:数组中所有值的乘积是 0 ,且 signFunc(0) = 0
//示例 3:
//
//输入:nums = [-1,1,-1,1,-1]
//输出:-1
//解释:数组中所有值的乘积是 -1 ,且 signFunc(-1) = -1
//
//
//提示:
//
//1 <= nums.length <= 1000
//-100 <= nums[i] <= 100
func arraySign(nums []int) int {
var sign int
for _, v := range nums {
if v == 0 {
return 0
} else if v > 0 {
sign = -sign
}
}
return sign
}
|
package tracer
import "github.com/nkbai/tgo/log"
type tracingPoints struct {
startAddressList []uint64
endAddressList []uint64
goRoutinesInside []int64
}
// IsStartAddress returns true if the addr is same as the start address.
func (p *tracingPoints) IsStartAddress(addr uint64) bool {
for _, startAddr := range p.startAddressList {
if startAddr == addr {
return true
}
}
return false
}
// IsEndAddress returns true if the addr is same as the end address.
func (p *tracingPoints) IsEndAddress(addr uint64) bool {
for _, endAddr := range p.endAddressList {
if endAddr == addr {
return true
}
}
return false
}
// Enter updates the list of the go routines which are inside the tracing point.
// It does nothing if the go routine has already entered.
func (p *tracingPoints) Enter(goRoutineID int64) {
for _, existingGoRoutine := range p.goRoutinesInside {
if existingGoRoutine == goRoutineID {
return
}
}
log.Debugf("Start tracing of go routine #%d", goRoutineID)
p.goRoutinesInside = append(p.goRoutinesInside, goRoutineID)
return
}
// Exit clears the inside go routines list.
func (p *tracingPoints) Exit(goRoutineID int64) {
log.Debugf("End tracing of go routine #%d", goRoutineID)
for i, existingGoRoutine := range p.goRoutinesInside {
if existingGoRoutine == goRoutineID {
p.goRoutinesInside = append(p.goRoutinesInside[0:i], p.goRoutinesInside[i+1:]...)
return
}
}
return
}
// Inside returns true if the go routine is inside the tracing point.
func (p *tracingPoints) Inside(goRoutineID int64) bool {
for _, existingGoRoutine := range p.goRoutinesInside {
if existingGoRoutine == goRoutineID {
return true
}
}
return false
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"fmt"
"strings"
"testing"
"github.com/cockroachdb/datadriven"
)
func TestPretty(t *testing.T) {
datadriven.Walk(t, "testdata", func(t *testing.T, path string) {
datadriven.RunTest(t, path, prettyTest)
})
}
func prettyTest(t *testing.T, d *datadriven.TestData) string {
switch d.Cmd {
case "pretty":
n := defaultWidth
if d.HasArg("n") {
d.ScanArgs(t, "n", &n)
}
exprgen := d.HasArg("expr")
s, err := prettyify(strings.NewReader(d.Input), n, exprgen)
if err != nil {
return fmt.Sprintf("ERROR: %s", err)
}
// Verify we round trip correctly by ensuring non-whitespace
// scanner tokens are encountered in the same order.
{
origToks := toTokens(d.Input)
prettyToks := toTokens(s)
for i, tok := range origToks {
if i >= len(prettyToks) {
t.Fatalf("pretty ended early after %d tokens", i+1)
}
if prettyToks[i] != tok {
t.Log(s)
t.Logf("expected %q", tok)
t.Logf("got %q", prettyToks[i])
t.Fatalf("token %d didn't match", i+1)
}
}
if len(prettyToks) > len(origToks) {
t.Fatalf("orig ended early after %d tokens", len(origToks))
}
}
// Verify lines aren't too long.
{
for i, line := range strings.Split(s, "\n") {
if strings.HasPrefix(line, "#") {
continue
}
if len(line) > defaultWidth {
t.Errorf("line %d is %d chars, expected <= %d:\n%s", i+1, len(line), defaultWidth, line)
}
}
}
return s
default:
t.Fatal("unknown command")
return ""
}
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
)
func main() {
raw, err := ioutil.ReadFile(os.Args[1])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
var floor = 0
var position = 0
var entered = false
for _, r := range string(raw) {
if !entered {
position += 1
}
switch r {
case '(':
floor += 1
case ')':
floor -= 1
default:
fmt.Println("invalid input")
os.Exit(1)
}
if floor < 0 {
entered = true
}
}
fmt.Println(floor, position)
}
|
package gosi
import (
"encoding/json"
"fmt"
"net"
"time"
"github.com/shirou/gopsutil/v3/host"
psnet "github.com/shirou/gopsutil/v3/net"
)
//const timeformat = "2006/01/02 15:04:05"
const timeformat = "2006/01/02 15:04:05.000000000"
type IpAddr struct {
Name string `json:"name"`
IpAddr string `json:"ipaddr"`
}
type InfoStat struct {
Hostname string `json:"hostname"`
OS string `json:"os"`
Platform string `json:"platform"`
PlatformFamily string `json:"platformFamily"`
PlatformVersion string `json:"platformVersion"`
KernelArch string `json:"kernelArch"`
Uptime string `json:"uptime"`
BootTime string `json:"bootTime"`
ServerTime string `json:"serverTime"`
CpuTemperature string `json:"cpuTemperature"`
IpAddres []IpAddr `json:"ipaddr"`
}
func (s InfoStat) Json() []byte {
j, _ := json.Marshal(s)
return j
}
// Info ホスト情報を取得
func Info() *InfoStat {
ret := &InfoStat{}
i, _ := host.Info()
ret.Hostname = i.Hostname
ret.OS = i.OS
ret.Platform = i.Platform
ret.PlatformFamily = i.PlatformFamily
ret.PlatformVersion = i.PlatformVersion
ret.KernelArch = i.KernelArch
ret.Uptime = uptime2string(i.Uptime)
//ret.BootTime = time.Unix(int64(i.BootTime), 0).Format(timeformat)
ret.BootTime = time.Unix(int64(i.BootTime), int64(i.BootTime)%1000000000).Format(timeformat)
ret.ServerTime = time.Now().Format(timeformat)
n, _ := psnet.Interfaces()
ipaddres := make([]IpAddr, 0)
for _, v := range n {
if len(v.Addrs) > 0 {
for _, a := range v.Addrs {
ipaddr, ipnet, err := net.ParseCIDR(a.Addr)
if err != nil {
fmt.Println(err)
}
if ipnet.IP.To4() != nil && !ipnet.IP.IsLoopback() && !ipnet.IP.IsLinkLocalUnicast() {
ipaddres = append(ipaddres, IpAddr{v.Name, ipaddr.String()})
}
}
}
}
ret.IpAddres = ipaddres
// 仮想環境では温度が取得できないが、WindowsではVirtualizationSystemが取れないのでとりあえずコメントにしておく
//ret.CpuTemperature, _ = getTemperatures()
return ret
}
// uptime2string uptime(経過秒)をuptimeと同じ"0 days, 00:00"形式に変換する
func uptime2string(uptime uint64) string {
const oneDay int = 60 * 60 * 24
if int(uptime) > oneDay {
day := int(uptime) / oneDay
secondsOfTheDay := day * oneDay
d := time.Duration(int(uptime)-secondsOfTheDay) * time.Second
d = d.Round(time.Minute)
h := d / time.Hour
d -= h * time.Hour
m := d / time.Minute
return fmt.Sprintf("%d days, %d:%02d", day, h, m)
} else {
d := time.Duration(int(uptime)) * time.Second
d = d.Round(time.Minute)
h := d / time.Hour
d -= h * time.Hour
m := d / time.Minute
return fmt.Sprintf("%d:%02d", h, m)
}
}
|
package model
import (
"go_crud/database"
entity "go_crud/entities"
"html"
"strings"
)
type User entity.User
func (user *User) FindAll() (*[]User, error) {
var users []User
err := database.Connector.Joins("JOIN roles ON roles.id=users.role_id").Preload("roles").Find(&users).Error
if err != nil {
return &[]User{}, err
}
return &users, err
}
func (user *User) FindById(id int32) (*User, error) {
err := database.Connector.Model(User{}).First(&user, id).Error
if err != nil {
return &User{}, err
}
return user, err
}
func (user *User) Delete(id int32) error {
err := database.Connector.Model(&User{}).Where("id=?", id).Delete(&User{}).Error
if err != nil {
return err
}
return nil
}
func (user *User) Create(newUser *User) (*User, error) {
newUser.Name = html.EscapeString(strings.TrimSpace(newUser.Name))
newUser.Surname = html.EscapeString(strings.TrimSpace(newUser.Surname))
err := database.Connector.Model(newUser).Create(newUser).Error
if err != nil {
return &User{}, err
}
return user, err
}
func (user *User) Update(id int32) (*User, error) {
result := database.Connector.Model(&user).Where("id = ?", id).UpdateColumns(
map[string]interface{}{
"name": user.Name,
"surname": user.Surname,
},
)
if result.Error != nil {
return &User{}, result.Error
}
//return updated user
err := database.Connector.Model(&user).First(&user, id).Error
if err != nil {
return &User{}, err
}
return user, err
}
|
package api
import (
"github.com/mickaelmagniez/elastic-alert/api/router"
)
func Run() {
router.Load()
}
|
package main
import (
"fmt"
"log"
"sync"
"time"
"zk"
)
var (
TESTIP = []string{
//"172.19.32.16",
"192.168.56.101",
}
)
func main() {
log.Println("testing")
conn := zk.New()
err := conn.Connect(TESTIP)
if err != nil {
log.Println(err)
}
t1 := time.Now()
wg := sync.WaitGroup{}
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
traverse(conn, "/ymb/loggers")
}()
}
wg.Wait()
t2 := time.Now()
log.Println(t2.Sub(t1))
}
// traverse all znodes under the specified path
func traverse(conn *zk.ZkCli, path string) {
children, err := conn.Children(path)
if err != nil {
return
}
if len(children) <= 0 {
flag, err := conn.Exists(path)
if err == nil {
fmt.Println(path, flag)
}
}
for _, znode := range children {
if path == "/" {
//fmt.Printf("Searching ZNode: /%s\n", znode)
traverse(conn, "/"+znode)
} else {
//fmt.Printf("Searching ZNode: %s/%s\n", path, znode)
traverse(conn, path+"/"+znode)
}
}
}
|
package datagrid
import (
"fmt" // used for outputting to the terminal
//"time" // used for pausing, measuring duration, etc
//"math/rand" // random number generator
//"math"
//"sync"
)
// Import internal packages
import (
cfg "flood_go/config"
)
// Import external packages
import (
"github.com/veandco/go-sdl2/sdl"
)
const (
COLS = cfg.COLS
ROWS = cfg.ROWS
)
type DataGrid struct {
Cells [ROWS][COLS][5] int // amount, smell, _smell, _amount, unused
//Smell [ROWS][COLS][2]float64 // smell, _smell [todo]
UserId int
BaseColor *sdl.Color
Enemy *DataGrid
Pixels *[]byte
NeighbourLUT [ROWS][COLS][4][3]int // row, col, exists
// temp
Amount *[]byte
}
func (this *DataGrid) Init() {
this.CalculateNeighbourLUT()
fmt.Println("")
}
/* Sets entire Cell array to zeros */
func (this *DataGrid) Clear() {
for row := range this.Cells {
for col := range this.Cells[row] {
this.Cells[row][col][0] = 0
this.Cells[row][col][1] = 0
this.Cells[row][col][2] = 0
this.Cells[row][col][3] = 0
this.Cells[row][col][4] = 0
}
}
}
/* Set cfg.KEY_AMOUNT and cfg.KEY_I_AMOUNT simultaneously */
func (this *DataGrid) SetCell(row, col int, amount int) {
this.Cells[row][col][cfg.KEY_AMOUNT] = amount
this.Cells[row][col][cfg.KEY_I_AMOUNT] = amount
}
/* Alias for SetCell(r, c, 0) */
func (this *DataGrid) Kill(row, col int) {
this.SetCell(row, col, 0)
}
/* Makes a lookup table that allows us to lookup if cell r,c has
top/bottom/left/right neighbour
*/
func (this *DataGrid) CalculateNeighbourLUT() {
for row := 0; row < cfg.ROWS; row++ {
for col := 0; col < cfg.COLS; col++ {
if (row > 0) { // :top
this.NeighbourLUT[row][col][0][0] = row - 1
this.NeighbourLUT[row][col][0][1] = col
this.NeighbourLUT[row][col][0][2] = 1 // exists
}
if (row < ROWS-1) { // :bottom
this.NeighbourLUT[row][col][1][0] = row + 1
this.NeighbourLUT[row][col][1][1] = col
this.NeighbourLUT[row][col][1][2] = 1 // exists
}
if (col > 0) { // :left
this.NeighbourLUT[row][col][2][0] = row
this.NeighbourLUT[row][col][2][1] = col - 1
this.NeighbourLUT[row][col][2][2] = 1 // exists
}
if (col < COLS-1) { // :right
this.NeighbourLUT[row][col][3][0] = row
this.NeighbourLUT[row][col][3][1] = col + 1
this.NeighbourLUT[row][col][3][2] = 1 // exists
}
}
}
}
/* Not used atm */
func (this *DataGrid) ClearSmell() {
// reset smell of every cell to 0
for row := 0; row < cfg.ROWS; row++ {
for col := 0; col < cfg.COLS; col++ {
this.Cells[row][col][1] = 0
}
}
}
func (this *DataGrid) GetAvgSmell(row, col, depth int) int {
// References
var NeighbourLUT = &(this.NeighbourLUT)
var Cells = &(this.Cells)
// Ephemeral
var nb_row int
var nb_col int
var nb_smell int
var nb_amount int
// Used to calc the avg
number_of_nbs := 0
total_sum := 0
amount := (*Cells)[row][col][cfg.KEY_AMOUNT]
// Collect data from all (existing) neighbours
for i := 0; i < 4; i++ {
if (*NeighbourLUT)[row][col][i][cfg.LUTKEY_EXISTS] == 1 {
// Keep a tally of total existing neighbours to get a good avg
number_of_nbs ++
nb_row = (*NeighbourLUT)[row][col][i][cfg.LUTKEY_ROW]
nb_col = (*NeighbourLUT)[row][col][i][cfg.LUTKEY_COL]
// If depth > 0: just ask the cell to give its average smell & add to total
if depth > 0 {
total_sum += this.GetAvgSmell(nb_row, nb_col, depth-1)
// Else: calc new smell for the cell & add to total
} else {
nb_smell = (*Cells)[nb_row][nb_col][cfg.KEY_SMELL]
nb_amount = (*Cells)[nb_row][nb_col][cfg.KEY_AMOUNT]
total_sum += nb_smell + nb_amount
}
}
}
// calc intermediate smell
return (amount + (total_sum / number_of_nbs))
}
func (this *DataGrid) UpdateSmell(f float64) {
// References
var Cells = &(this.Cells)
// calc intermediate smell
/*
for row := 0; row < cfg.ROWS; row++ {
for col := 0; col < cfg.COLS; col++ {
// Get Smell Average for current cell
_smell = this.GetAvgSmell(row, col, 0)
// Adjust smell to allow for dissipation
if _smell > 200 {
_smell = int(float64(_smell) * 0.999)
}
if _smell > 0 && f > float64(_smell)/200 {
_smell -= 1
}
// update intermediate smell
(*Cells)[row][col][cfg.KEY_I_SMELL] = _smell
}
}
*/
// temp
done_top := make(chan bool)
done_bottom := make(chan bool)
go this.UpdateIntermediateSmell(done_top, 0, cfg.ROWS/2, f)
go this.UpdateIntermediateSmell(done_bottom, cfg.ROWS/2, cfg.ROWS, f)
<- done_top
<- done_bottom
// update smell
for row := 0; row < cfg.ROWS; row++ {
for col := 0; col < cfg.COLS; col++ {
(*Cells)[row][col][cfg.KEY_SMELL] = (*Cells)[row][col][2]
}
}
}
func (this *DataGrid) UpdateIntermediateSmell(done chan bool, row_start, row_end int, f float64) {
var _smell int
// References
var Cells = &(this.Cells)
// calc intermediate smell
for row := row_start; row < row_end; row++ {
for col := 0; col < cfg.COLS; col++ {
// Get Smell Average for current cell
_smell = this.GetAvgSmell(row, col, 0)
// Adjust smell to allow for dissipation
if _smell > 200 {
_smell = int(float64(_smell) * 0.999)
}
if _smell > 0 && f > float64(_smell)/200 {
_smell -= 1
}
// update intermediate smell
(*Cells)[row][col][cfg.KEY_I_SMELL] = _smell
}
}
// signal that we're done
done <- true
}
func (this *DataGrid) KillAll() {
for row := 0; row < cfg.ROWS; row++ {
for col := 0; col < cfg.COLS; col++ {
this.SetCell(row, col, 0)
}
}
} |
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package lacros
import (
"context"
"time"
lacroscommon "chromiumos/tast/common/cros/lacros"
"chromiumos/tast/ctxutil"
"chromiumos/tast/remote/bundles/cros/lacros/provision"
"chromiumos/tast/remote/bundles/cros/lacros/update"
"chromiumos/tast/remote/bundles/cros/lacros/version"
"chromiumos/tast/rpc"
lacrosservice "chromiumos/tast/services/cros/lacros"
"chromiumos/tast/testing"
)
type updatePath struct {
channel string
skew *version.Version // version skew from rootfs-lacros
}
// Test scenarios that represent different update paths to be tested in the parameterized tests.
var (
// 1. Updates on the same channel that go with +1 minor version bump to +1 major, and to +2 major from rootfs-lacros.
pathUpdateOnSameChannel = []updatePath{
{
channel: lacroscommon.LacrosDevComponent,
skew: version.New(0, 1, 0, 0), // +0 major +1 minor from rootfs-lacros
},
{
channel: lacroscommon.LacrosDevComponent,
skew: version.New(1, 0, 0, 0), // +1 major +0 minor
},
{
channel: lacroscommon.LacrosDevComponent,
skew: version.New(2, 0, 0, 0), // +2 major
},
}
// 2. Upgrade to a channel of a newer milestone (eg, dev to canary) assuming that canary is one milestone ahead of dev.
pathUpgradeChannel = []updatePath{
{
channel: lacroscommon.LacrosDevComponent,
skew: version.New(0, 1, 0, 0), // +0 major +1 minor on dev-channel
},
{
channel: lacroscommon.LacrosCanaryComponent,
skew: version.New(1, 0, 0, 0), // +1 major +0 minor on canary-channel
},
}
// 3. Downgrade to a channel of an older milestone (eg, canary to dev)
pathDowngradeChannel = []updatePath{
{
channel: lacroscommon.LacrosCanaryComponent,
skew: version.New(1, 0, 0, 0), // +1 major +0 minor on canary-channel
},
{
channel: lacroscommon.LacrosDevComponent,
skew: version.New(0, 1, 0, 0), // +0 major +1 minor on dev-channel
},
}
)
func init() {
testing.AddTest(&testing.Test{
Func: UpdateStatefulToStateful,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Tests that the newest Stateful Lacros is selected when there are more than one Stateful Lacros installed. This can also test version skew policy in Ash by provisioning any major version skews",
Contacts: []string{"hyungtaekim@chromium.org", "lacros-team@google.com", "chromeos-sw-engprod@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "lacros"},
ServiceDeps: []string{"tast.cros.lacros.UpdateTestService"},
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"lacros_stable"},
Val: pathUpdateOnSameChannel,
}, {
Name: "unstable",
ExtraSoftwareDeps: []string{"lacros_unstable"},
Val: pathUpdateOnSameChannel,
}, {
Name: "channel_upgrade",
ExtraSoftwareDeps: []string{"lacros_stable"},
Val: pathUpgradeChannel,
}, {
Name: "channel_downgrade",
ExtraSoftwareDeps: []string{"lacros_stable"},
Val: pathDowngradeChannel,
}},
Timeout: 5 * time.Minute,
})
}
func UpdateStatefulToStateful(ctx context.Context, s *testing.State) {
// Create a UpdateTestService client.
conn, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to DUT: ", err)
}
defer conn.Close(ctx)
utsClient := lacrosservice.NewUpdateTestServiceClient(conn.Conn)
// The versions of Stateful Lacros.
// Used to verify the update path of Stateful => Stateful (1) on the same channel and (2) when switching channels.
// Each version should be newer than Rootfs Lacros, but not over the maximum version skew of (Ash + 2 major).
rootfsLacrosVersion, err := update.GetRootfsLacrosVersion(ctx, s.DUT(), utsClient)
if err != nil {
s.Fatal("Failed to get the Rootfs Lacros version: ", err)
}
ashVersion, err := update.GetAshVersion(ctx, s.DUT(), utsClient)
if err != nil {
s.Fatal("Failed to get the Ash version: ", err)
}
baseVersion := rootfsLacrosVersion
// Deferred cleanup to always reset to the previous state with no provisioned files.
ctxForCleanup := ctx
ctx, cancel := ctxutil.Shorten(ctx, 1*time.Minute)
defer cancel()
defer func(ctx context.Context) {
update.SaveLogsFromDut(ctx, s.DUT(), s.OutDir())
if err := update.ClearLacrosUpdate(ctx, utsClient); err != nil {
s.Log("Failed to clean up provisioned Lacros: ", err)
}
}(ctxForCleanup)
// Verify the updates from Stateful => Stateful.
for _, updateInfo := range s.Param().([]updatePath) {
statefulLacrosVersion := baseVersion.Increment(updateInfo.skew)
overrideComponent := updateInfo.channel
// TODO(hyungtaekim): Consider a helper function to check versions for all tests.
if !statefulLacrosVersion.IsValid() {
s.Fatal("Invalid Stateful Lacros version: ", statefulLacrosVersion)
} else if !statefulLacrosVersion.IsNewerThan(rootfsLacrosVersion) {
s.Fatalf("Invalid Stateful Lacros version: %v, should not be older than Rootfs: %v", statefulLacrosVersion, rootfsLacrosVersion)
} else if !statefulLacrosVersion.IsSkewValid(ashVersion) {
s.Fatalf("Invalid Stateful Lacros version: %v, should be compatible with Ash: %v", statefulLacrosVersion, ashVersion)
}
// Provision Stateful Lacros from the Rootfs Lacros image file with the simulated version and component.
if err := update.ProvisionLacrosFromRootfsLacrosImagePath(ctx, provision.TLSAddrVar.Value(), s.DUT(), statefulLacrosVersion.GetString(), overrideComponent); err != nil {
s.Fatal("Failed to provision Stateful Lacros from Rootfs image source: ", err)
}
// Verify that the expected Stateful Lacros version/component is selected.
if err := update.VerifyLacrosUpdate(ctx, lacrosservice.BrowserType_LACROS_STATEFUL, statefulLacrosVersion.GetString(), overrideComponent, utsClient); err != nil {
s.Fatal("Failed to verify provisioned Lacros version: ", err)
}
}
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"testing"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/assert"
)
// TestReplicatedCmdBuf verifies the replicatedCmdBuf behavior.
func TestReplicatedCmdBuf(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
var buf replicatedCmdBuf
// numStates is chosen arbitrarily.
const numStates = 5*replicatedCmdBufNodeSize + 1
// Test that the len field is properly updated.
var states []*replicatedCmd
for i := 0; i < numStates; i++ {
assert.Equal(t, i, int(buf.len))
states = append(states, buf.allocate())
assert.Equal(t, i+1, int(buf.len))
}
// Test the iterator.
var it replicatedCmdBufSlice
i := 0
for it.init(&buf); it.Valid(); it.Next() {
assert.Equal(t, states[i], it.cur())
i++
}
assert.Equal(t, i, numStates) // make sure we saw them all
// Test clear.
buf.clear()
assert.EqualValues(t, buf, replicatedCmdBuf{})
assert.Equal(t, 0, int(buf.len))
it.init(&buf)
assert.False(t, it.Valid())
// Test clear on an empty buffer.
buf.clear()
assert.EqualValues(t, buf, replicatedCmdBuf{})
}
|
package tree
import (
"github.com/jagandecapri/vision/utils"
)
type Grid struct{
Store map[Range]*Unit
ClusterContainer
point_unit_map map[int]Range
minDensePoints int
minClusterPoints int
cluster_id_counter int
listDenseUnits map[Range]*Unit
tmpUnitToCluster map[Range]*Unit
}
func NewGrid() Grid {
units := Grid{
Store: make(map[Range]*Unit),
point_unit_map: make(map[int]Range),
listDenseUnits: make(map[Range]*Unit),
tmpUnitToCluster: make(map[Range]*Unit),
ClusterContainer: ClusterContainer{ListOfClusters: make(map[int]Cluster)},
}
return units
}
func (us *Grid) GetUnits() map[Range]*Unit{
return us.Store
}
func (us *Grid) GetUnitsToCluster() map[Range]*Unit{
if len(us.tmpUnitToCluster) == 0{
return us.Store
} else {
return us.tmpUnitToCluster
}
}
func (us *Grid) GetMinDensePoints() int{
return us.minDensePoints
}
func (us *Grid) GetMinClusterPoints() int{
return us.minClusterPoints
}
func (us *Grid) GetNextClusterID() int{
us.cluster_id_counter += 1
return us.cluster_id_counter
}
func (us *Grid) RemovePoint(point Point, rg Range){
unit, ok := us.Store[rg]
if ok{
unit.RemovePoint(point)
delete(us.point_unit_map, point.GetID())
}
}
func (us *Grid) AddPoint(point Point, rg Range){
unit, ok := us.Store[rg]
if ok{
unit.AddPoint(point)
us.point_unit_map[point.GetID()] = rg
}
}
func (us *Grid) UpdatePoint(point Point, new_range Range){
point_id := point.GetID()
cur_range := us.point_unit_map[point_id]
if cur_range != new_range{
us.RemovePoint(point, cur_range)
us.AddPoint(point, new_range)
}
}
func (us *Grid) GetPointRange(id int) Range{
return us.point_unit_map[id]
}
func (us *Grid) AddUnit(unit *Unit){
us.Store[unit.Range] = unit
}
func (us *Grid) SetupGrid(interval_l float64){
for rg, unit := range us.Store{
neighbour_units := us.GetNeighbouringUnits(rg, interval_l)
unit.SetNeighbouringUnits(neighbour_units)
us.Store[rg] = unit
}
}
func (us *Grid) RecomputeDenseUnits(min_dense_points int) (map[Range]*Unit, map[Range]*Unit){
listNewDenseUnits := make(map[Range]*Unit)
listOldDenseUnits := make(map[Range]*Unit)
for rg, unit := range us.Store{
if isDenseUnit(unit, min_dense_points){
_, ok := us.listDenseUnits[rg]
if !ok{
us.listDenseUnits[rg] = unit
listNewDenseUnits[rg] = unit
}
} else {
_, ok := us.listDenseUnits[rg]
if ok{
delete(us.listDenseUnits, rg)
listOldDenseUnits[rg] = unit
}
}
}
return listNewDenseUnits, listOldDenseUnits
}
func (us *Grid) ProcessOldDenseUnits(listOldDenseUnits map[Range]*Unit) map[Range]*Unit {
listUnitToRep := make(map[Range]*Unit)
for _, unit := range listOldDenseUnits{
cluster_id := unit.Cluster_id
unit.Cluster_id = UNCLASSIFIED
count_neighbour_same_cluster := 0
for _, neighbour_unit := range unit.GetNeighbouringUnits() {
if neighbour_unit.Cluster_id == cluster_id{
count_neighbour_same_cluster++
}
if count_neighbour_same_cluster >= 2{
break
}
}
if count_neighbour_same_cluster >= 2 {
src := us.RemoveCluster(cluster_id)
for rg, unit := range src.ListOfUnits{
unit.Cluster_id = UNCLASSIFIED
listUnitToRep[rg] = unit
}
}
}
return listUnitToRep
}
func (us *Grid) Cluster(min_dense_points int, min_cluster_points int){
listNewDenseUnits, listOldDenseUnits := us.RecomputeDenseUnits(min_dense_points)
us.tmpUnitToCluster = listNewDenseUnits
_ = IGDCA(us, min_dense_points, min_cluster_points)
listUnitToRep := us.ProcessOldDenseUnits(listOldDenseUnits)
us.tmpUnitToCluster = listUnitToRep
_ = IGDCA(us, min_dense_points, min_cluster_points)
}
func (us *Grid) GetOutliers() []Point{
tmp := []Point{}
for _, unit := range us.Store{
if unit.Cluster_id == UNCLASSIFIED{
for _, point := range unit.GetPoints(){
tmp = append(tmp, point)
}
}
}
return tmp
}
func (us *Grid) isDenseUnit(unit *Unit, min_dense_points int) bool{
return unit.GetNumberOfPoints() >= min_dense_points
}
func (us *Grid) GetNeighbouringUnits(rg Range, interval_l float64) map[Range]*Unit {
/**
U = unit; n{x} => neighbouring units
|n3|n5|n8|
|n2|U |n7|
|n1|n4|n6|
*/
tmp1 := utils.Round(rg.Low[0] - interval_l, 0.1)
tmp2 := utils.Round(rg.Low[1] - interval_l, 0.1)
tmp3 := utils.Round(rg.High[0] + interval_l, 0.1)
tmp4 := utils.Round(rg.High[1] + interval_l, 0.1)
n1 := Range{Low:[2]float64{tmp1, tmp2},
High: [2]float64{rg.Low[0], rg.Low[1]}}
n2 := Range{Low:[2]float64{tmp1, rg.Low[1]},
High: [2]float64{rg.Low[0], rg.High[1]}}
n3 := Range{Low:[2]float64{tmp1, rg.High[1]},
High: [2]float64{rg.Low[0], tmp4}}
n4 := Range{Low:[2]float64{rg.Low[0], tmp2},
High: [2]float64{rg.High[0], rg.Low[1]}}
n5 := Range{Low:[2]float64{rg.Low[0], rg.High[1]},
High: [2]float64{rg.High[0], tmp4}}
n6 := Range{Low:[2]float64{rg.High[0], tmp2},
High: [2]float64{tmp3, rg.Low[1]}}
n7 := Range{Low:[2]float64{rg.High[0], rg.Low[1]},
High: [2]float64{tmp3, rg.High[1]}}
n8 := Range{Low:[2]float64{rg.High[0], rg.High[1]},
High: [2]float64{tmp3, tmp4}}
tmp := [8]Range{n1,n2,n3,n4,n5,n6,n7,n8}
neighbour_units := make(map[Range]*Unit)
for _, rg_search := range tmp{
unit, ok := us.Store[rg_search]
if ok{
neighbour_units[rg_search] = unit
}
}
return neighbour_units
} |
package ksql
import (
"fmt"
"log"
"testing"
"github.com/Mongey/terraform-provider-kafka/kafka"
r "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestBasicStream(t *testing.T) {
err := createTopic("vault")
if err != nil {
log.Printf("[DEBUG] Could not create topic %v", err)
}
r.Test(t, r.TestCase{
Providers: testAccProviders,
PreCheck: func() { testAccPreCheck(t) },
Steps: []r.TestStep{
{
Config: testKSQLStreamQuery,
Check: testResourceStream_Check,
},
},
})
}
func testResourceStream_Check(s *terraform.State) error {
resourceState := s.Modules[0].Resources["ksql_stream.example"]
if resourceState == nil {
return fmt.Errorf("resource not found in state")
}
instanceState := resourceState.Primary
if instanceState == nil {
return fmt.Errorf("resource has no primary instance")
}
name := instanceState.ID
if name != instanceState.Attributes["name"] {
return fmt.Errorf("id doesn't match name")
}
if name != "vault_logs" {
return fmt.Errorf("unexpected stream name %s", name)
}
return nil
}
const testKSQLStreamQuery = `
provider "ksql" {
url = "http://localhost:8088"
}
resource "ksql_stream" "example" {
name = "vault_logs"
query = "(time VARCHAR, type VARCHAR, auth STRUCT<client_token VARCHAR, accessor VARCHAR, display_name VARCHAR, policies ARRAY<STRING>, token_policies ARRAY<STRING>, entity_id VARCHAR, token_type VARCHAR>, request STRUCT<id VARCHAR, operation VARCHAR, path VARCHAR, remote_address VARCHAR>, response STRUCT<data STRUCT<error VARCHAR>>, error VARCHAR) WITH (KAFKA_TOPIC='vault', VALUE_FORMAT='JSON', TIMESTAMP='time', TIMESTAMP_FORMAT='yyyy-MM-dd''T''HH:mm:ss[.SSSSSS][.SSSSS][.SSSS][.SSS][.SS][.S]''Z''');"
}
`
func createTopic(name string) error {
kafkaConfig := &kafka.Config{
BootstrapServers: &[]string{"localhost:9092"},
Timeout: 900,
}
kAdmin, err := kafka.NewClient(kafkaConfig)
if err == nil {
topic := kafka.Topic{
Name: name,
Partitions: 1,
ReplicationFactor: 1,
}
err = kAdmin.CreateTopic(topic)
if err != nil {
log.Printf("[ERROR] Creating Topic: %v", err)
return err
}
} else {
log.Printf("[ERROR] Unable to create client: %s", err)
}
return err
}
|
package wsServer
import (
"log"
"net/http"
"sync"
)
const (
sendChanBufferSize = 64
)
var (
globPeers sync.Map = sync.Map{}
)
func Upgrade(w http.ResponseWriter, r *http.Request) {
peer, err := CreatePeer(w, r)
if err != nil {
log.Printf("[ERROR] upgrade: %v\n", err)
return
}
go peer.readPump()
peer.writePump()
peer.yeet()
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sqltelemetry
import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
)
var (
// CreateMultiRegionDatabaseCounter is to be incremented when a multi-region
// database is created.
CreateMultiRegionDatabaseCounter = telemetry.GetCounterOnce(
"sql.multiregion.create_database",
)
// SetInitialPrimaryRegionCounter is to be incremented when
// a multi-region database is created using ALTER DATABASE ... PRIMARY KEY.
SetInitialPrimaryRegionCounter = telemetry.GetCounterOnce(
"sql.multiregion.alter_database.set_primary_region.initial_multiregion",
)
// SwitchPrimaryRegionCounter is to be incremented when
// a multi-region database has its primary region changed.
SwitchPrimaryRegionCounter = telemetry.GetCounterOnce(
"sql.multiregion.alter_database.set_primary_region.switch_primary_region",
)
// AlterDatabaseAddRegionCounter is to be incremented when a region is
// added to a database.
AlterDatabaseAddRegionCounter = telemetry.GetCounterOnce(
"sql.multiregion.add_region",
)
// AlterDatabaseDropRegionCounter is to be incremented when a non-primary
// region is dropped from a database.
AlterDatabaseDropRegionCounter = telemetry.GetCounterOnce(
"sql.multiregion.drop_region",
)
// AlterDatabaseDropPrimaryRegionCounter is to be incremented when a primary
// region is dropped from a database.
AlterDatabaseDropPrimaryRegionCounter = telemetry.GetCounterOnce(
"sql.multiregion.drop_primary_region",
)
// ImportIntoMultiRegionDatabaseCounter is to be incremented when an import
// statement is run against a multi-region database.
ImportIntoMultiRegionDatabaseCounter = telemetry.GetCounterOnce(
"sql.multiregion.import",
)
)
// CreateDatabaseSurvivalGoalCounter is to be incremented when the survival goal
// on a multi-region database is being set.
func CreateDatabaseSurvivalGoalCounter(goal string) telemetry.Counter {
return telemetry.GetCounter(fmt.Sprintf("sql.multiregion.create_database.survival_goal.%s", goal))
}
// AlterDatabaseSurvivalGoalCounter is to be incremented when the survival goal
// on a multi-region database is being altered.
func AlterDatabaseSurvivalGoalCounter(goal string) telemetry.Counter {
return telemetry.GetCounter(fmt.Sprintf("sql.multiregion.alter_database.survival_goal.%s", goal))
}
// CreateTableLocalityCounter is to be incremented every time a locality
// is set on a table.
func CreateTableLocalityCounter(locality string) telemetry.Counter {
return telemetry.GetCounter(
fmt.Sprintf("sql.multiregion.create_table.locality.%s", locality),
)
}
// AlterTableLocalityCounter is to be incremented every time a locality
// is changed on a table.
func AlterTableLocalityCounter(from, to string) telemetry.Counter {
return telemetry.GetCounter(
fmt.Sprintf("sql.multiregion.alter_table.locality.from.%s.to.%s", from, to),
)
}
|
package domain
import (
"context"
"fmt"
"sync"
categoryDomain "flamingo.me/flamingo-commerce/v3/category/domain"
product "flamingo.me/flamingo-commerce/v3/product/domain"
"flamingo.me/flamingo/v3/framework/flamingo"
"flamingo.me/flamingo/v3/framework/opencensus"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
)
type (
// IndexProcess responsible to call the injected loader to index products into the passed repository
IndexProcess struct {
indexUpdater IndexUpdater
indexer *Indexer
logger flamingo.Logger
enableIndexing bool
}
// Indexer provides useful features to work with the Repositories for indexing purposes
Indexer struct {
productRepository ProductRepository
categoryRepository CategoryRepository
logger flamingo.Logger
batchProductQueue []product.BasicProduct
batchCatQueue []product.CategoryTeaser
}
// CategoryTreeBuilder helper to build category tree
CategoryTreeBuilder struct {
// rootCategory - this is the link into the tree that is going to be built
rootCategory *categoryDomain.TreeData
// categoryTreeIndex - the link into the treenode - is built
categoryTreeIndex map[string]*categoryDomain.TreeData
// child -> parent
nodeLinkRawData map[string]string
}
categoryRawNode struct {
code string
name string
parent string
}
// IndexUpdater - interface to update the index with the help of the Indexer
IndexUpdater interface {
Index(ctx context.Context, rep *Indexer) error
}
)
var (
mutex sync.Mutex
docCount = stats.Int64("flamingo-commerce-adapter-standalone/commercesearch/products/doc_count", "Number of product documents in the index", stats.UnitDimensionless)
)
func init() {
err := opencensus.View(docCount.Name(), docCount, view.LastValue())
if err != nil {
panic(err)
}
}
// Inject for Indexer
func (i *Indexer) Inject(logger flamingo.Logger, productRepository ProductRepository,
config *struct {
CategoryRepository CategoryRepository `inject:",optional"`
}) *Indexer {
i.logger = logger
i.productRepository = productRepository
if config != nil {
i.categoryRepository = config.CategoryRepository
}
return i
}
// PrepareIndex of the available repository implementations
func (i *Indexer) PrepareIndex(ctx context.Context) error {
err := i.productRepository.PrepareIndex(ctx)
if err != nil {
return err
}
if i.categoryRepository != nil {
return i.categoryRepository.PrepareIndex(ctx)
}
return nil
}
// ProductRepository to get
func (i *Indexer) ProductRepository() ProductRepository {
return i.productRepository
}
func (i *Indexer) commit(ctx context.Context) error {
err := i.productRepository.UpdateProducts(ctx, i.batchProductQueue)
if err != nil {
return err
}
i.batchProductQueue = nil
if i.categoryRepository != nil {
err = i.categoryRepository.UpdateByCategoryTeasers(ctx, i.batchCatQueue)
if err != nil {
return err
}
i.batchCatQueue = nil
}
return nil
}
// UpdateProductAndCategory helper to update product and the assigned categoryteasers
func (i *Indexer) UpdateProductAndCategory(ctx context.Context, product product.BasicProduct) error {
i.batchProductQueue = append(i.batchProductQueue, product)
if product.BaseData().Categories != nil {
i.batchCatQueue = append(i.batchCatQueue, product.BaseData().Categories...)
}
if product.BaseData().MainCategory.Code != "" {
i.batchCatQueue = append(i.batchCatQueue, product.BaseData().MainCategory)
}
err := i.commit(ctx)
stats.Record(ctx, docCount.M(i.productRepository.DocumentsCount()))
return err
}
// Inject dependencies
func (p *IndexProcess) Inject(indexUpdater IndexUpdater, logger flamingo.Logger, indexer *Indexer, config *struct {
EnableIndexing bool `inject:"config:flamingoCommerceAdapterStandalone.commercesearch.enableIndexing,optional"`
}) {
p.indexUpdater = indexUpdater
p.indexer = indexer
p.enableIndexing = config.EnableIndexing
p.logger = logger.WithField(flamingo.LogKeyModule, "flamingo-commerce-adapter-standalone").WithField(flamingo.LogKeyCategory, "indexer")
}
// Run the index process with registered loader (using indexer as helper for the repository access)
func (p *IndexProcess) Run(ctx context.Context) error {
if !p.enableIndexing {
p.logger.Info("Skipping Indexing..")
return nil
}
mutex.Lock()
defer mutex.Unlock()
p.logger.Info("Prepareing Indexes..")
err := p.indexer.PrepareIndex(ctx)
if err != nil {
return err
}
p.logger.Info("Start registered Indexer..")
err = p.indexUpdater.Index(ctx, p.indexer)
if err != nil {
return err
}
p.logger.Info("Indexing finished..")
return nil
}
// AddCategoryData to the builder.. Call this as often as you want to add before calling BuildTree
func (h *CategoryTreeBuilder) AddCategoryData(code string, name string, parentCode string) {
if h.categoryTreeIndex == nil {
h.categoryTreeIndex = make(map[string]*categoryDomain.TreeData)
}
if h.rootCategory == nil {
h.rootCategory = &categoryDomain.TreeData{}
}
// root category is either a default empty node or detected by code == parentCode
if code == parentCode {
h.rootCategory = &categoryDomain.TreeData{
CategoryCode: code,
CategoryName: name,
}
h.categoryTreeIndex[code] = h.rootCategory
return
}
if h.nodeLinkRawData == nil {
h.nodeLinkRawData = make(map[string]string)
}
builtBasicNode := categoryDomain.TreeData{
CategoryCode: code,
CategoryName: name,
}
h.categoryTreeIndex[code] = &builtBasicNode
h.nodeLinkRawData[code] = parentCode
}
// BuildTree build Tree based on added categoriedata
func (h *CategoryTreeBuilder) BuildTree() (*categoryDomain.TreeData, error) {
// Build the tree links
for childCode, parentCode := range h.nodeLinkRawData {
childNode, ok := h.categoryTreeIndex[childCode]
if !ok {
return nil, fmt.Errorf("ChildNode %v not found", childNode)
}
var parentNode *categoryDomain.TreeData
if parentCode == "" {
parentNode = h.rootCategory
} else {
parentNode, ok = h.categoryTreeIndex[parentCode]
if !ok {
return nil, fmt.Errorf("ParentCode %v not found", parentCode)
}
}
parentNode.SubTreesData = append(parentNode.SubTreesData, childNode)
}
buildPathString(h.rootCategory)
return h.rootCategory, nil
}
func buildPathString(parent *categoryDomain.TreeData) {
// Build the Path
for _, subNode := range parent.SubTreesData {
subNode.CategoryPath = parent.CategoryPath + "/" + subNode.CategoryCode
buildPathString(subNode)
}
}
// CategoryTreeToCategoryTeaser conversion
func CategoryTreeToCategoryTeaser(searchedCategoryCode string, tree categoryDomain.Tree) *product.CategoryTeaser {
return categoryTreeToCategoryTeaser(searchedCategoryCode, tree, nil)
}
func categoryTreeToCategoryTeaser(searchedCategoryCode string, searchPosition categoryDomain.Tree, parentCategory *product.CategoryTeaser) *product.CategoryTeaser {
teaserForCurrentNode := &product.CategoryTeaser{
Code: searchPosition.Code(),
Path: searchPosition.Path(),
Name: searchPosition.Name(),
Parent: parentCategory,
}
// recursion stops of category found
if searchPosition.Code() == searchedCategoryCode {
return teaserForCurrentNode
}
for _, subNode := range searchPosition.SubTrees() {
found := categoryTreeToCategoryTeaser(searchedCategoryCode, subNode, teaserForCurrentNode)
if found != nil {
return found
}
}
return nil
}
|
package rsscombine
import "fmt"
import "github.com/mmcdole/gofeed"
import "github.com/gorilla/feeds"
import "sort"
import "time"
import "net/http"
import "log"
import "github.com/spf13/viper"
import "io/ioutil"
import "mvdan.cc/xurls"
import "strings"
func getUrlsFromFeedsUrl(feeds_url string) []string {
log.Printf("Loading feed URLs from: %v", feeds_url)
client := &http.Client{
Timeout: time.Duration(viper.GetInt("client_timeout_seconds")) * time.Second,
}
response, err := client.Get(feeds_url)
if err != nil {
log.Fatal(err)
} else {
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
} else {
stringContents := string(contents)
// TODO: this is a hack
for _, exclude := range viper.GetStringSlice("feed_exclude_prefixes") {
stringContents = strings.Replace(stringContents, exclude, "", -1)
}
feed_urls := xurls.Strict().FindAllString(stringContents, -1)
return feed_urls
}
}
return nil
}
func getUrls() []string {
feeds_url := viper.GetString("feed_urls")
if feeds_url != "" {
return getUrlsFromFeedsUrl(feeds_url)
}
return viper.GetStringSlice("feeds")
}
func fetchUrl(url string, ch chan<-*gofeed.Feed) {
log.Printf("Fetching URL: %v\n", url)
fp := gofeed.NewParser()
fp.Client = &http.Client{
Timeout: time.Duration(viper.GetInt("client_timeout_seconds")) * time.Second,
}
feed, err := fp.ParseURL(url)
if err == nil {
ch <- feed
} else {
log.Printf("Error on URL: %v (%v)", url, err)
ch <- nil
}
}
func fetchUrls(urls []string) []*gofeed.Feed {
allFeeds := make([]*gofeed.Feed, 0)
ch := make(chan *gofeed.Feed)
for _, url := range urls {
go fetchUrl(url, ch)
}
for range urls {
feed := <- ch
if feed != nil {
allFeeds = append(allFeeds, feed)
}
}
return allFeeds
}
// TODO: there must be a shorter syntax for this
type byPublished []*gofeed.Feed
func (s byPublished) Len() int {
return len(s)
}
func (s byPublished) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byPublished) Less(i, j int) bool {
date1 := s[i].Items[0].PublishedParsed
if date1 == nil {
date1 = s[i].Items[0].UpdatedParsed
}
date2 := s[j].Items[0].PublishedParsed
if date2 == nil {
date2 = s[j].Items[0].UpdatedParsed
}
return date1.Before(*date2)
}
func getAuthor(feed *gofeed.Feed) string {
if feed.Author != nil {
return feed.Author.Name
}
if feed.Items[0].Author != nil {
return feed.Items[0].Author.Name
}
log.Printf("Could not determine author for %v", feed.Link)
return viper.GetString("default_author_name")
}
func combineallFeeds(allFeeds []*gofeed.Feed) *feeds.Feed {
feed := &feeds.Feed{
Title: viper.GetString("title"),
Link: &feeds.Link{Href: viper.GetString("link")},
Description: viper.GetString("description"),
Author: &feeds.Author{
Name: viper.GetString("author_name"),
Email: viper.GetString("author_email"),
},
Created: time.Now(),
}
sort.Sort(sort.Reverse(byPublished(allFeeds)))
limit_per_feed := viper.GetInt("feed_limit_per_feed")
seen := make(map[string]bool)
for _, sourceFeed := range allFeeds {
for _, item := range sourceFeed.Items[:limit_per_feed] {
if seen[item.Link] {
continue
}
created := item.PublishedParsed
if created == nil {
created = item.UpdatedParsed
}
feed.Items = append(feed.Items, &feeds.Item{
Title: item.Title,
Link: &feeds.Link{Href: item.Link},
Description: item.Description,
Author: &feeds.Author{Name: getAuthor(sourceFeed)},
Created: *created,
Content: item.Content,
})
seen[item.Link] = true
}
}
return feed
}
func GetAtomFeed() *feeds.Feed {
urls := getUrls()
allFeeds := fetchUrls(urls)
combinedFeed := combineallFeeds(allFeeds)
return combinedFeed
}
func LoadConfig() {
viper.SetConfigName("rsscombine")
viper.AddConfigPath(".")
viper.SetEnvPrefix("RSSCOMBINE")
viper.AutomaticEnv()
viper.SetDefault("port", "8080")
viper.SetDefault("default_author_name", "Unknown Author")
viper.SetDefault("server_timeout_seconds", "60")
viper.SetDefault("client_timeout_seconds", "60")
viper.SetDefault("feed_limit_per_feed", "20")
err := viper.ReadInConfig()
if err != nil {
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
}
|
package wechat
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"math/rand"
"sort"
"strconv"
"strings"
"text/template"
"time"
)
// 设置时间戳
func StampString() string {
ts64 := time.Now().Unix()
return strconv.FormatInt(ts64, 10)
}
// 随机字符串
func CreateNonceStr(length int) string {
str := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
bytes := []byte(str)
result := []byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < length; i++ {
result = append(result, bytes[r.Intn(len(bytes))])
}
return string(result)
}
func NonceStringGenerator(length int) string {
str := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
bytes := []byte(str)
result := []byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < length; i++ {
result = append(result, bytes[r.Intn(len(bytes))])
}
return string(result)
}
// //转字符串
// func ToString(value interface{}) string {
// switch value.(type){
// case string:
// return value.(string)
// case int32:
// v := value.(int)
// return strconv.Itoa(v)
// case int64:
// v := value.(int64)
// return strconv.FormatInt(v,10)
// case float32:
// v := value.(float32)
// return strconv.FormatFloat(v, 'f', -1, 32)
// case float64:
// v := value.(float64)
// return strconv.FormatFloat(v, 'f', -1, 64)
// }
// return ""
// }
//签名
func StringSign(data map[string]string) string {
var a []string
for k, v := range data {
a = append(a, k+"="+v)
}
sort.Strings(a)
return strings.Join(a, "&")
}
func Md5(singString string) string {
h := md5.New()
h.Write([]byte(singString))
return hex.EncodeToString(h.Sum(nil))
}
//签名
func Sha1Sign(stringSign string) string {
r := sha1.Sum([]byte(stringSign))
return hex.EncodeToString(r[:])
}
//map转xml字符串
func MapToXml(data map[string]string) string {
var xml = `<xml>`
for k, v := range data {
xml += `<` + k + `>` + v + `<` + k + `>`
}
xml += `</xml>`
return xml
}
type JsonPare struct {
Input string
Output string
Data map[string]string
}
func (js *JsonPare) Write(b []byte) (n int, err error) {
js.Output += string(b)
return len(b), nil
}
func (js *JsonPare) Pare() {
tmpl, err := template.New("json").Parse(js.Input)
if err != nil {
panic(err)
}
err = tmpl.Execute(js, js.Data)
if err != nil {
panic(err)
}
}
//
func PareToJson(josnTmpl string, data map[string]string) string {
js := &JsonPare{
Data: data,
Input: josnTmpl,
}
js.Pare()
return js.Output
}
|
package node
type Resp struct {
Key Key
Label ID
Value []byte
}
func (r *Resp) equals(o *Resp) bool {
return r.Key.Key == o.Key.Key && r.Label.eq(o.Label) && string(r.Value) == string(o.Value)
}
|
package repository
import (
"database/sql"
"ehsan_esmaeili/model"
"fmt"
)
type TransactionRepository interface {
Insert(user *model.Transaction) (use *model.GetaUser, err error )
}
type TransactionRepositorySqlServer struct {
db *sql.DB //64b
table string //4b
//68
}
func NewTransactionRepositorySqlServer(table string, db *sql.DB) *UserRepositorySqlServer {
return &UserRepositorySqlServer{
db: db,
table: table,
}
}
func (r *UserRepositorySqlServer) InsertTransaction(user *model.Transaction) (use *model.GetaUser, err error ) {
var a model.GetaUser
err = r.db.QueryRow(" Exec transaction_insert ?,?,?,?,?,?",
user.Transaction_Id,
user.Transaction_Time,
user.Transaction_Price,
user.Transaction_User_Id,
user.Tracking_Number,
user.Transaction_IsSuccessful,
).Scan(&a.Id)
fmt.Println(a.Id)
//fmt.Sprintln(data)
//a, err := data.()
// fmt.Println(user.Coin_Id)
// //fmt.Println(a)
// fmt.Println(user.User_Id)
if err != nil {
return nil, err
}
// else if errorcod != 0{
// return "", nil
// }
// else{
return &a, nil
//}
//err.Scan(errorcod)
}
|
package svrcfg
import (
"fmt"
"os"
"github.com/go-ini/ini"
)
const (
configFile = "config.ini"
)
type config struct {
AppMode string
// [server]
PidFile string
UseTLS bool
CertPath string
CertKeyPath string
ServerAddr string
ServerPort int
HostOverride string
}
// Config is the exported configuration
var Config = &config{}
// sets up the exported configuration for any packages that import this config pkg
func init() {
cfg, err := ini.Load(configFile)
if err != nil {
fmt.Printf("Fail to read file: %v", err)
os.Exit(1)
}
// [DEFAULT]
Config.AppMode = cfg.Section("").Key("app_mode").String()
// [server]
serverSection := cfg.Section("server")
Config.PidFile = serverSection.Key("pidfile").String()
Config.UseTLS = serverSection.Key("use_tls").MustBool(false)
Config.CertPath = serverSection.Key("cert_path").String()
Config.CertKeyPath = serverSection.Key("cert_key_path").String()
Config.ServerAddr = serverSection.Key("server_addr").String()
Config.ServerPort = serverSection.Key("server_port").MustInt(10000)
Config.HostOverride = serverSection.Key("host_override").String()
fmt.Printf("%v\n", Config)
}
|
package gitlabClient
import (
"fmt"
"github.com/xanzy/go-gitlab"
)
func (git *GitLab) GetMilestoneByGroupName(pid gitlab.Group, milestoune string) (*gitlab.GroupMilestone, error) {
opt := gitlab.ListGroupMilestonesOptions{
Search: milestoune,
ListOptions: gitlab.ListOptions{
PerPage: 100,
Page: 1,
},
}
for {
// Get the first page with projects.
milesounes, resp, err := git.Client.GroupMilestones.ListGroupMilestones(pid.ID, &opt)
if err != nil {
return nil, err
}
// List all the projects we've found so far.
//for _, p := range milesounes {
// fmt.Printf("Found milesoune: %s\n", p.Title)
//}
// Exit the loop when we've seen all pages.
if resp.CurrentPage >= resp.TotalPages {
if len(milesounes) != 0 {
return milesounes[0], err
}
return nil, err
}
// Update the page number to get the next page.
opt.Page = resp.NextPage
}
}
func (git *GitLab) GetGroupMilestoneIssues(name string) ([]*gitlab.Issue, error) {
var issues []*gitlab.Issue
nameMilestoune := fmt.Sprintf("\"%s\"", name)
projects, err := git.ListGroup()
if err != nil {
return nil, err
}
for _, group := range projects {
m, err := git.GetMilestoneByGroupName(*group, nameMilestoune)
if err != nil || m == nil {
// TODO: Check status code 403/404/etc
//return nil, err
continue
}
opt := gitlab.GetGroupMilestoneIssuesOptions{
PerPage: 100,
Page: 1,
}
for {
// Get the first page with projects.
iss, resp, err := git.Client.GroupMilestones.GetGroupMilestoneIssues(group.ID, m.ID, &opt)
if err != nil {
return issues, err
}
// Exit the loop when we've seen all pages.
if resp.CurrentPage >= resp.TotalPages {
issues = append(issues, iss...)
break
}
// Update the page number to get the next page.
opt.Page = resp.NextPage
}
}
return issues, nil
}
|
package log
import (
"io"
"os"
"sync"
)
type Handler interface {
Handle(lr LogRecord)
SetLogLevel(l LogLevel)
LogLevel() LogLevel
Formatter() LogFormatter
SetFormatter(lf LogFormatter)
io.Closer
}
type BaseHandler struct {
minLevel LogLevel
fmt LogFormatter
}
func (h *BaseHandler) LogLevel() LogLevel {
return h.minLevel
}
func (h *BaseHandler) SetLogLevel(lvl LogLevel) {
h.minLevel = lvl
}
func (h *BaseHandler) Formatter() LogFormatter {
return h.fmt
}
func (h *BaseHandler) SetFormatter(lf LogFormatter) {
h.fmt = lf
}
type StreamHandler struct {
BaseHandler
stream io.Writer
lock sync.Mutex
closed bool
}
func (sh *StreamHandler) Handle(lr LogRecord) {
if lr.Level() < sh.minLevel {
return
}
sh.lock.Lock()
defer sh.lock.Unlock()
sh.fmt.Format(sh.stream, lr)
io.WriteString(sh.stream, "\n")
}
func (sh *StreamHandler) Close() error {
sh.lock.Lock()
defer sh.lock.Unlock()
if sh.closed {
return nil
}
if err := sh.fmt.Close(sh.stream); err != nil {
return err
}
sh.closed = true
c, ok := sh.stream.(io.Closer)
if ok {
return c.Close()
}
return nil
}
func NewStdoutHandler() *StreamHandler {
return &StreamHandler{
BaseHandler: BaseHandler{
minLevel: LevelNotSet,
fmt: &ColorFormatter{},
},
stream: os.Stdout,
}
}
func NewStderrHandler() *StreamHandler {
return &StreamHandler{
BaseHandler: BaseHandler{
minLevel: LevelNotSet,
fmt: &ColorFormatter{},
},
stream: os.Stderr,
}
}
func NewFileHandler(filename string) (*StreamHandler, error) {
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
return &StreamHandler{
BaseHandler: BaseHandler{
minLevel: LevelNotSet,
fmt: &DefaultFormatter{},
},
stream: f,
}, nil
}
|
package online
import (
"bufio"
"hash/fnv"
"os"
"strconv"
"strings"
)
func Hash(s string, dim int) int {
h := fnv.New32a()
h.Write([]byte(s))
return int(h.Sum32()) % dim
}
func FileToHash(in string, out string, length int, ads int, ignore []int, dim int) error {
fpIn, err := os.Open(in)
if err != nil {
return err
}
defer fpIn.Close()
fpOut, err := os.Create(out)
if err != nil {
return err
}
defer fpOut.Close()
length = length - len(ignore)
scanner := bufio.NewScanner(fpIn)
writer := bufio.NewWriter(fpOut)
var line string
var fields []string
var text string
for scanner.Scan() {
line = scanner.Text()
fields = strings.Split(line, ",")
fields = removeFields(fields, ignore)
for i, label := range fields[length:] {
l, err := strconv.Atoi(label)
if err != nil {
return err
}
text = ""
text += strconv.Itoa(l) + " " + strconv.Itoa(i+1) + ":1"
for _, f := range fields[0:length] {
text += " " + strconv.Itoa(Hash(f, dim)+ads+1) + ":1"
}
writer.WriteString(text + "\n")
}
}
writer.Flush()
return nil
}
|
package connect
import (
"context"
"fmt"
"io/ioutil"
"os"
"github.com/fatih/color"
"github.com/porter-dev/porter/cli/cmd/api"
"github.com/porter-dev/porter/cli/cmd/utils"
)
// GCR creates a GCR integration
func GCR(
client *api.Client,
projectID uint,
) (uint, error) {
// if project ID is 0, ask the user to set the project ID or create a project
if projectID == 0 {
return 0, fmt.Errorf("no project set, please run porter project set [id]")
}
keyFileLocation, err := utils.PromptPlaintext(fmt.Sprintf(`Please provide the full path to a service account key file.
Key file location: `))
if err != nil {
return 0, err
}
// attempt to read the key file location
if info, err := os.Stat(keyFileLocation); !os.IsNotExist(err) && !info.IsDir() {
// read the file
bytes, err := ioutil.ReadFile(keyFileLocation)
if err != nil {
return 0, err
}
// create the aws integration
integration, err := client.CreateGCPIntegration(
context.Background(),
projectID,
&api.CreateGCPIntegrationRequest{
GCPKeyData: string(bytes),
},
)
if err != nil {
return 0, err
}
color.New(color.FgGreen).Printf("created gcp integration with id %d\n", integration.ID)
regURL, err := utils.PromptPlaintext(fmt.Sprintf(`Please provide the registry URL, in the form [GCP_DOMAIN]/[GCP_PROJECT_ID]. For example, gcr.io/my-project-123456.
Registry URL: `))
if err != nil {
return 0, err
}
// create the registry
// query for registry name
regName, err := utils.PromptPlaintext(fmt.Sprintf(`Give this registry a name: `))
if err != nil {
return 0, err
}
reg, err := client.CreateGCR(
context.Background(),
projectID,
&api.CreateGCRRequest{
Name: regName,
GCPIntegrationID: integration.ID,
URL: regURL,
},
)
if err != nil {
return 0, err
}
color.New(color.FgGreen).Printf("created registry with id %d and name %s\n", reg.ID, reg.Name)
return reg.ID, nil
}
return 0, fmt.Errorf("could not read service account key file")
}
|
package notify
import (
pb "github.com/LILILIhuahuahua/ustc_tencent_game/api/proto"
"github.com/LILILIhuahuahua/ustc_tencent_game/framework"
"github.com/LILILIhuahuahua/ustc_tencent_game/framework/event"
"github.com/LILILIhuahuahua/ustc_tencent_game/internal/event/info"
)
type EnterGameNotify struct {
framework.BaseEvent
PlayerID int32
Connect info.ConnectInfo
}
func (e *EnterGameNotify) FromMessage(obj interface{}) {
pbMsg := obj.(*pb.EnterGameNotify)
e.SetCode(int32(pb.GAME_MSG_CODE_ENTER_GAME_NOTIFY))
e.PlayerID = pbMsg.GetPlayerId()
infoMsg := pbMsg.GetClientConnectMsg()
info := info.ConnectInfo{}
info.FromMessage(infoMsg)
e.Connect = info
}
func (e *EnterGameNotify) CopyFromMessage(obj interface{}) event.Event {
pbMsg := obj.(*pb.Notify).EnterGameNotify
infoMsg := pbMsg.GetClientConnectMsg()
info := info.ConnectInfo{}
info.FromMessage(infoMsg)
notify := &EnterGameNotify{
PlayerID: pbMsg.GetPlayerId(),
Connect: info,
}
notify.SetCode(int32(pb.GAME_MSG_CODE_ENTER_GAME_NOTIFY))
return notify
}
func (e *EnterGameNotify) ToMessage() interface{} {
infoMsg := &pb.ConnectMsg{
Ip: e.Connect.Ip,
Port: e.Connect.Port,
}
return &pb.EnterGameNotify{
PlayerId: e.PlayerID,
ClientConnectMsg: infoMsg,
}
}
|
package controllers
import (
"testing"
"todo-app/models/connection"
)
func TestCretaeTask(t *testing.T) {
connection.InitConnection()
defer connection.DB.Close()
}
func TestGetTasks(t *testing.T) {
connection.InitConnection()
defer connection.DB.Close()
}
|
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
var a [10]int
var b [10]int
rand.Seed(time.Now().UnixNano())
for i := 0; i < len(a); i++ {
a[i] = rand.Intn(100)
}
fmt.Println(a)
//倒序显示
for i := 0; i < len(a); i++ {
b[i] = a[len(a)-i-1]
}
fmt.Println(b)
//平均值
sum := 0.0
for _, v := range a {
sum += float64(v)
}
fmt.Println("avg=", sum/float64(len(a)))
//最大值和最大值的下标
max := a[0]
index := 0
for i := 0; i < len(a); i++ {
if max < a[i] {
max = a[i]
index = i
}
}
fmt.Printf("最大值为%d,索引为%d\n", max, index)
//查找里边是否有55
for k, v := range a {
if v == 55 {
fmt.Println("找到了")
break
} else if k == len(a)-1 {
fmt.Println("没找到")
}
}
}
|
package main
import (
"fmt"
"strings"
)
type Company map[string][]string
func addPersonToDepartment(company map[string][]string, department string, person string) {
company[department] = append(company[department], person)
}
func showPeopleInDeparment(company map[string][]string, department string) {
peopleInDepartment := company[department] // Type: []string
fmt.Println("-------------------------------------")
fmt.Println("Department:", department, "has:")
for _, person := range peopleInDepartment {
fmt.Println("\t", person)
}
fmt.Println("-------------------------------------")
}
func main() {
var company = Company{}
for {
fmt.Println("===================================================")
fmt.Println("1. Add person to department")
fmt.Println("2. Show people in department")
fmt.Println("3. Show all people in company")
fmt.Println("4. Quit")
fmt.Print("Enter your choice:\t")
var choice string
// Retrieve choice from user and switch...
fmt.Scanln(&choice)
fmt.Println("===================================================")
choice = strings.Trim(choice, "\n\t")
switch choice {
case "1":
// Prompt user for person's name & department and then store it
fmt.Print("Enter the name and department:\t")
var person, department string
fmt.Scanf("Add %s to %s", &person, &department) // Add <person-name> to <department>
addPersonToDepartment(company, department, person)
case "2":
var department string
// Prompt user for department and then display people in a department
fmt.Print("Enter the department:\t")
fmt.Scanf("%s", &department) // Add <person-name> to <department>
showPeopleInDeparment(company, department)
case "3":
// Display all the people sorted across departments
// showPeopleInCompany()
default:
return
}
}
}
|
package main
import (
"errors"
"fmt"
"reflect"
)
// 填充不同struct的相同字段
type Person struct {
Name string `json:name`
Age int `json:age`
}
type Student struct {
Name string
Age int
Class string
}
// 填充函数,入参是接口类型和对应struct值的map
func fill(x interface{}, setting map[string]interface{}) error {
// 先判断传进来的x是不是Ptr 值类型没办法赋值
// reflect.TypeOf(x).Kind() 类型一定是Ptr
// 再判断是不是struct了理性
// reflect.TypeOf(x).Elem().Kind() 类型是Sturct
if reflect.TypeOf(x).Kind() != reflect.Ptr {
if reflect.TypeOf(x).Elem().Kind() != reflect.Struct {
return errors.New("type not struct")
}
}
// 判断setting是否为空
if setting == nil {
return errors.New("setting is nil")
}
var (
field reflect.StructField
ok bool
)
//从setting里取kv 并且赋值
for k, v := range setting {
// 具体的结构体字段的信息
// eflect.ValueOf(x).Elem().Type().FieldByName(k)
// 匹配存在的字段并赋值
if field, ok = (reflect.ValueOf(x).Elem().Type().FieldByName(k)); !ok {
continue
}
// 匹配要填充的value的类型
if field.Type == reflect.TypeOf(v) {
// 获取当前struct的值
vstr := reflect.ValueOf(x)
// 获取字段的接口或者是指针对应的值
vstr = vstr.Elem()
// 结构体字段赋值
vstr.FieldByName(k).Set(reflect.ValueOf(v))
}
}
return nil
}
func main() {
set := map[string]interface{}{
"Name": "tom",
"Age": 18,
}
p := &Person{}
fill(p, set)
fmt.Println(p)
s := &Student{}
fill(s, set)
fmt.Println(s)
}
|
package sqlite
import (
"context"
"database/sql"
"embed"
"log"
"path"
"path/filepath"
"sync"
"time"
"github.com/twcclan/goback/backup"
"github.com/twcclan/goback/proto"
"contrib.go.opencensus.io/integrations/ocsql"
_ "github.com/mattn/go-sqlite3"
)
var tracedSQLiteDriver = ""
func init() {
var err error
// register openconsensus database wrapper
tracedSQLiteDriver, err = ocsql.Register("sqlite3", ocsql.WithAllTraceOptions())
if err != nil {
log.Fatalf("failed to register ocsql driver: %s", err)
}
}
func NewIndex(base, backupSet string, store backup.ObjectStore) *Index {
idx := &Index{base: base, backupSet: backupSet, txMtx: new(sync.Mutex), ObjectStore: store}
return idx
}
var _ backup.Index = (*Index)(nil)
type Index struct {
backup.ObjectStore
base string
backupSet string
db *sql.DB
txMtx *sync.Mutex
}
func (s *Index) ReachableCommits(ctx context.Context, f func(commit *proto.Commit) error) error {
panic("implement me")
}
//go:embed sql/*.sql
var files embed.FS
func (s *Index) Open() error {
db, err := sql.Open(tracedSQLiteDriver, path.Join(s.base, "index.db")+"?busy_timeout=1000")
if err != nil {
return err
}
err = db.Ping()
if err != nil {
return err
}
sqlFiles, err := files.ReadDir("sql")
if err != nil {
return err
}
for _, file := range sqlFiles {
data, err := files.ReadFile(path.Join("sql", file.Name()))
if err != nil {
return err
}
_, err = db.Exec(string(data))
if err != nil {
return err
}
}
s.db = db
return nil
}
func (s *Index) FindMissing(ctx context.Context) error {
refs := make([][]byte, 0)
rows, err := s.db.QueryContext(ctx, "SELECT ref FROM objects;")
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
ref := make([]byte, 0)
err = rows.Scan(&ref)
if err != nil {
return err
}
obj, err := s.ObjectStore.Get(ctx, &proto.Ref{Sha1: ref})
if err != nil {
return err
}
if obj == nil {
log.Printf("Found missing object %x", ref)
refs = append(refs, ref)
}
}
rows.Close()
tx, err := s.db.Begin()
if err != nil {
return err
}
defer tx.Commit()
log.Printf("Deleting %d missing objects", len(refs))
for _, ref := range refs {
_, err := tx.Exec("DELETE FROM objects where ref = ?;", ref)
if err != nil {
return err
}
}
return nil
}
func (s *Index) Close() error {
return s.db.Close()
}
func (s *Index) ReIndex(ctx context.Context) error {
return s.ObjectStore.Walk(ctx, true, proto.ObjectType_COMMIT, func(obj *proto.Object) error {
if obj.GetCommit().GetBackupSet() != s.backupSet {
return nil
}
return s.index(ctx, obj.GetCommit())
})
}
func (s *Index) index(ctx context.Context, commit *proto.Commit) error {
log.Printf("Indexing commit: %v", time.Unix(commit.Timestamp, 0))
// whenever we get to index a commit
// we'll traverse the complete backup tree
// to create our filesystem path index
treeObj, err := s.ObjectStore.Get(ctx, commit.Tree)
if err != nil {
return err
}
if treeObj == nil {
log.Printf("Root tree %x could not be retrieved", commit.Tree.Sha1)
return nil
}
tx, err := s.db.Begin()
if err != nil {
return err
}
stmt, err := tx.PrepareContext(ctx, "INSERT OR IGNORE INTO files(path, timestamp, size, mode, ref) VALUES(?,?,?,?,?)")
if err != nil {
return nil
}
err = backup.TraverseTree(ctx, s.ObjectStore, treeObj, 64, func(filepath string, node *proto.TreeNode) error {
info := node.Stat
if info.Tree {
return nil
}
// store the relative path to this file in the index
_, sqlErr := stmt.ExecContext(ctx, filepath, info.Timestamp, info.Size, info.Mode, node.Ref.Sha1)
return sqlErr
})
if err != nil {
log.Printf("Failed building tree for commit, skipping: %v", err)
return tx.Rollback()
}
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO commits (timestamp, tree) VALUES (?, ?)", commit.Timestamp, commit.Tree.Sha1)
if err != nil {
return err
}
return tx.Commit()
}
func (s *Index) Put(ctx context.Context, object *proto.Object) error {
// store the object first
err := s.ObjectStore.Put(ctx, object)
if err != nil {
return err
}
switch object.Type() {
case proto.ObjectType_COMMIT:
return s.index(ctx, object.GetCommit())
}
return nil
}
func (s *Index) FileInfo(ctx context.Context, set string, name string, notAfter time.Time, count int) ([]*proto.TreeNode, error) {
infoList := make([]*proto.TreeNode, count)
rows, err := s.db.QueryContext(ctx, "SELECT path, timestamp, size, mode, ref FROM files WHERE path = ? AND timestamp <= ? ORDER BY timestamp DESC LIMIT ?;", name, notAfter.Unix(), count)
if err != nil {
return nil, err
}
defer rows.Close()
counter := 0
for rows.Next() {
ref := &proto.Ref{}
info := &proto.FileInfo{}
err = rows.Scan(&info.Name, &info.Timestamp, &info.Size, &info.Mode, &ref.Sha1)
if err != nil {
return nil, err
}
info.Name = filepath.Base(info.Name)
infoList[counter] = &proto.TreeNode{
Stat: info,
Ref: ref,
}
counter++
}
return infoList[:counter], rows.Err()
}
func (s *Index) CommitInfo(ctx context.Context, set string, notAfter time.Time, count int) ([]*proto.Commit, error) {
infoList := make([]*proto.Commit, count)
rows, err := s.db.QueryContext(ctx, "SELECT timestamp, tree FROM commits WHERE timestamp <= ? ORDER BY timestamp DESC LIMIT ?;", notAfter.UTC().Unix(), count)
if err != nil {
return nil, err
}
defer rows.Close()
counter := 0
for rows.Next() {
commit := &proto.Commit{}
tree := proto.Ref{}
err = rows.Scan(&commit.Timestamp, &tree.Sha1)
if err != nil {
return nil, err
}
commit.Tree = &tree
infoList[counter] = commit
counter++
}
return infoList[:counter], rows.Err()
}
|
package store
import "sync"
// threadSafeStore is a wrapper around golang native
// hashmap
// this wrapper helps adding mutex to the map
type threadSafeStore struct {
store map[string]map[string]interface{}
sync.RWMutex
}
// newThreadSafeStore creates a new threadSafeStore
// and a returns a pointer to it
func newThreadSafeStore() *threadSafeStore {
return &threadSafeStore{
store: make(map[string]map[string]interface{}),
}
}
|
package objectstorage
import (
"sync"
)
type PartitionsManager struct {
childPartitions map[string]*PartitionsManager
retainCounter int
mutex sync.RWMutex
}
func NewPartitionsManager() *PartitionsManager {
return &PartitionsManager{
childPartitions: make(map[string]*PartitionsManager),
}
}
func (partitionsManager *PartitionsManager) IsEmpty() bool {
partitionsManager.mutex.RLock()
defer partitionsManager.mutex.RUnlock()
return partitionsManager.isEmpty()
}
func (partitionsManager *PartitionsManager) isEmpty() bool {
return partitionsManager.retainCounter == 0 && len(partitionsManager.childPartitions) == 0
}
func (partitionsManager *PartitionsManager) IsRetained(keys []string) bool {
partitionsManager.mutex.RLock()
defer partitionsManager.mutex.RUnlock()
if len(keys) == 0 {
return !partitionsManager.isEmpty()
}
childPartition, childPartitionExists := partitionsManager.childPartitions[keys[0]]
if !childPartitionExists {
return false
}
return childPartition.IsRetained(keys[1:])
}
func (partitionsManager *PartitionsManager) Release(keysToRelease []string) bool {
if len(keysToRelease) == 0 {
partitionsManager.mutex.Lock()
partitionsManager.retainCounter--
partitionsManager.mutex.Unlock()
} else {
childPartitionKey := keysToRelease[0]
partitionsManager.mutex.RLock()
childPartition := partitionsManager.childPartitions[childPartitionKey]
partitionsManager.mutex.RUnlock()
if childPartition.Release(keysToRelease[1:]) {
partitionsManager.mutex.Lock()
if childPartition.IsEmpty() {
delete(partitionsManager.childPartitions, childPartitionKey)
}
partitionsManager.mutex.Unlock()
}
}
return partitionsManager.isEmpty()
}
func (partitionsManager *PartitionsManager) Retain(keysToRetain []string) {
if len(keysToRetain) == 0 {
partitionsManager.mutex.Lock()
partitionsManager.retainCounter++
partitionsManager.mutex.Unlock()
return
}
partitionKey := keysToRetain[0]
partitionsManager.mutex.RLock()
if childPartition, childPartitionExists := partitionsManager.childPartitions[partitionKey]; childPartitionExists {
childPartition.Retain(keysToRetain[1:])
partitionsManager.mutex.RUnlock()
return
}
partitionsManager.mutex.RUnlock()
partitionsManager.mutex.Lock()
defer partitionsManager.mutex.Unlock()
if childPartition, childPartitionExists := partitionsManager.childPartitions[partitionKey]; childPartitionExists {
childPartition.Retain(keysToRetain[1:])
return
}
newChildPartition := NewPartitionsManager()
partitionsManager.childPartitions[partitionKey] = newChildPartition
newChildPartition.Retain(keysToRetain[1:])
}
// FreeMemory copies the content of the internal maps to newly created maps.
// This is necessary, otherwise the GC is not able to free the memory used by the old maps.
// "delete" doesn't shrink the maximum memory used by the map, since it only marks the entry as deleted.
func (partitionsManager *PartitionsManager) FreeMemory() {
partitionsManager.mutex.Lock()
defer partitionsManager.mutex.Unlock()
childPartitions := make(map[string]*PartitionsManager)
for key, childPartition := range partitionsManager.childPartitions {
childPartitions[key] = childPartition
if childPartition != nil {
childPartition.FreeMemory()
}
}
partitionsManager.childPartitions = childPartitions
}
|
package schemastore
import (
parser "github.com/daischio/daischeme/codemodel/schemaparser"
//"fmt"
"github.com/daischio/daischeme/codemodel/util"
)
type SchemaStore struct {
ModelName string
schema *parser.Scheme
}
// General named schema
type NamedSchema struct {
Name string // schema name
*parser.Scheme
}
func New(modelName string, s *parser.Scheme) *SchemaStore {
st := &SchemaStore{modelName, s};
return st
}
func (s *SchemaStore) GetSchema() *NamedSchema {
return &NamedSchema{s.ModelName, s.schema}
}
func (s *SchemaStore) GetSchemas() []*NamedSchema {
// Create
schemas := make([]*NamedSchema, 0)
// Add the first schema
schemas = append(schemas, &NamedSchema{s.ModelName, s.schema})
// Define a walking function
var walk func(s *parser.Scheme)
walk = func(s *parser.Scheme) {
// Iterate over properties and get nested schemas
for k, v := range s.Properties {
//fmt.Printf("%v: %+v\n",k,v)
if v.Type == "object" && v.Properties != nil {
// append the schema
schemas = append(schemas, &NamedSchema{util.Capitalize(k),&v})
// walk that schema also
walk(&v)
}
}
}
// Iterate
walk(s.schema)
return schemas
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree
import (
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeofday"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCompareTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
pacificTimeZone := int32(7 * 60 * 60)
sydneyTimeZone := int32(-10 * 60 * 60)
sydneyFixedZone := time.FixedZone("otan@sydney", -int(sydneyTimeZone))
// kiwiFixedZone is 2 hours ahead of Sydney.
kiwiFixedZone := time.FixedZone("otan@auckland", -int(sydneyTimeZone)+2*60*60)
// No daylight savings in Hawaii!
ddate, err := NewDDateFromTime(time.Date(2019, time.November, 22, 0, 0, 0, 0, time.UTC))
require.NoError(t, err)
testCases := []struct {
desc string
left Datum
right Datum
location *time.Location
expected int
}{
{
desc: "same DTime are equal",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: MakeDTime(timeofday.New(12, 0, 0, 0)),
expected: 0,
},
{
desc: "same DTimeTZ are equal",
left: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
right: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
expected: 0,
},
{
desc: "DTime and DTimeTZ both UTC, and so are equal",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(12, 0, 0, 0), 0),
expected: 0,
},
{
desc: "DTime and DTimeTZ both Sydney time, and so are equal",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(12, 0, 0, 0), sydneyTimeZone),
location: sydneyFixedZone,
expected: 0,
},
{
desc: "DTimestamp and DTimestampTZ (Sydney) equal in Sydney zone",
left: MustMakeDTimestamp(time.Date(2019, time.November, 22, 10, 0, 0, 0, time.UTC), time.Microsecond),
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 22, 10, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: sydneyFixedZone,
expected: 0,
},
{
desc: "DTimestamp and DTimestampTZ (Sydney) equal in Sydney+2 zone",
left: MustMakeDTimestamp(time.Date(2019, time.November, 22, 12, 0, 0, 0, time.UTC), time.Microsecond),
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 22, 10, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: kiwiFixedZone,
expected: 0,
},
{
desc: "Date and DTimestampTZ (Sydney) equal in Sydney zone",
left: ddate,
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 22, 0, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: sydneyFixedZone,
expected: 0,
},
{
desc: "Date and DTimestampTZ (Sydney) equal in Sydney+2 zone",
left: ddate,
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 21, 22, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: kiwiFixedZone,
expected: 0,
},
{
desc: "equal wall clock time for DTime and DTimeTZ, with TimeTZ ahead",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
expected: 1,
},
{
desc: "equal wall clock time for DTime and DTimeTZ, with TimeTZ behind",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(5, 0, 0, 0), pacificTimeZone),
expected: -1,
},
{
desc: "equal wall clock time for DTime and DTimeTZ, with TimeTZ ahead",
left: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
right: NewDTimeTZFromOffset(timeofday.New(5, 0, 0, 0), pacificTimeZone),
expected: -1,
},
{
desc: "wall clock time different for DTimeTZ and DTimeTZ",
left: NewDTimeTZFromOffset(timeofday.New(23, 0, 0, 0), sydneyTimeZone),
right: NewDTimeTZFromOffset(timeofday.New(5, 0, 0, 0), pacificTimeZone),
expected: 1,
},
}
for _, tc := range testCases {
t.Run(
tc.desc,
func(t *testing.T) {
ctx := &EvalContext{
SessionData: &sessiondata.SessionData{
Location: tc.location,
},
}
assert.Equal(t, tc.expected, compareTimestamps(ctx, tc.left, tc.right))
assert.Equal(t, -tc.expected, compareTimestamps(ctx, tc.right, tc.left))
},
)
}
}
func TestCastStringToRegClassTableName(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
in string
expected TableName
}{
{"a", MakeUnqualifiedTableName("a")},
{`a"`, MakeUnqualifiedTableName(`a"`)},
{`"a""".bB."cD" `, MakeTableNameWithSchema(`a"`, "bb", "cD")},
}
for _, tc := range testCases {
t.Run(tc.in, func(t *testing.T) {
out, err := castStringToRegClassTableName(tc.in)
require.NoError(t, err)
require.Equal(t, tc.expected, out)
})
}
errorTestCases := []struct {
in string
expectedError string
}{
{"a.b.c.d", "too many components: a.b.c.d"},
{"", `invalid table name: `},
}
for _, tc := range errorTestCases {
t.Run(tc.in, func(t *testing.T) {
_, err := castStringToRegClassTableName(tc.in)
require.EqualError(t, err, tc.expectedError)
})
}
}
func TestSplitIdentifierList(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
in string
expected []string
}{
{`abc`, []string{"abc"}},
{`abc.dEf `, []string{"abc", "def"}},
{` "aBc" . d ."HeLLo"""`, []string{"aBc", "d", `HeLLo"`}},
}
for _, tc := range testCases {
t.Run(tc.in, func(t *testing.T) {
out, err := splitIdentifierList(tc.in)
require.NoError(t, err)
require.Equal(t, tc.expected, out)
})
}
errorTestCases := []struct {
in string
expectedError string
}{
{`"unclosed`, `invalid name: unclosed ": "unclosed`},
{`"unclosed""`, `invalid name: unclosed ": "unclosed""`},
{`hello !`, `invalid name: expected separator .: hello !`},
}
for _, tc := range errorTestCases {
t.Run(tc.in, func(t *testing.T) {
_, err := splitIdentifierList(tc.in)
require.EqualError(t, err, tc.expectedError)
})
}
}
|
package memio
import (
"io"
"unicode/utf8"
)
// Buffer grants a byte slice very straightforward IO methods.
type Buffer []byte
// Read satisfies the io.Reader interface
func (s *Buffer) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if len(*s) == 0 {
return 0, io.EOF
}
n := copy(p, *s)
*s = (*s)[n:]
return n, nil
}
// ReadAt satisfies the io.ReaderAt interface.
//
// Care should be taken when used in conjunction with any other Read* calls as
// they will alter the start point of the buffer
func (s *Buffer) ReadAt(p []byte, off int64) (int, error) {
n := copy(p, (*s)[off:])
if n < len(p) {
return n, io.EOF
}
return n, nil
}
// WriteTo satisfies the io.WriterTo interface
func (s *Buffer) WriteTo(w io.Writer) (int64, error) {
if len(*s) == 0 {
return 0, io.EOF
}
n, err := w.Write(*s)
*s = (*s)[n:]
return int64(n), err
}
// Write satisfies the io.Writer interface
func (s *Buffer) Write(p []byte) (int, error) {
*s = append(*s, p...)
return len(p), nil
}
// WriteAt satisfies the io.WriteAt interface.
func (s *Buffer) WriteAt(p []byte, off int64) (int, error) {
l := int64(len(p)) + off
if int64(cap(*s)) < l {
t := make([]byte, len(*s), l)
copy(t, (*s)[:cap(*s)])
*s = t
}
return copy((*s)[off:cap(*s)], p), nil
}
// WriteString writes a string to the buffer without casting to a byte slice
func (s *Buffer) WriteString(str string) (int, error) {
*s = append(*s, str...)
return len(str), nil
}
// ReadFrom satisfies the io.ReaderFrom interface
func (s *Buffer) ReadFrom(r io.Reader) (int64, error) {
var n int64
for {
if len(*s) == cap(*s) {
*s = append(*s, 0)[:len(*s)]
}
m, err := r.Read((*s)[len(*s):cap(*s)])
*s = (*s)[:len(*s)+m]
n += int64(m)
if err != nil {
if err == io.EOF {
return n, nil
}
return n, err
}
}
}
// ReadByte satisfies the io.ByteReader interface
func (s *Buffer) ReadByte() (byte, error) {
if len(*s) == 0 {
return 0, io.EOF
}
b := (*s)[0]
*s = (*s)[1:]
return b, nil
}
// ReadRune satisfies the io.RuneReader interface
func (s *Buffer) ReadRune() (rune, int, error) {
if len(*s) == 0 {
return 0, 0, io.EOF
}
r, n := utf8.DecodeRune(*s)
*s = (*s)[n:]
return r, n, nil
}
// WriteByte satisfies the io.ByteWriter interface
func (s *Buffer) WriteByte(b byte) error {
*s = append(*s, b)
return nil
}
// Peek reads the next n bytes without advancing the position
func (s *Buffer) Peek(n int) ([]byte, error) {
if *s == nil {
return nil, ErrClosed
} else if n > len(*s) {
return *s, io.EOF
}
return (*s)[:n], nil
}
// Close satisfies the io.Closer interface
func (s *Buffer) Close() error {
*s = nil
return nil
}
|
// Copyright (C) 2018 Satoshi Konno. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package echonet
import (
"bytes"
"fmt"
"github.com/cybergarage/uecho-go/net/echonet/encoding"
"github.com/cybergarage/uecho-go/net/echonet/protocol"
)
const (
PropertyCodeMin = 0x80
PropertyCodeMax = 0xFF
PropertyMapFormat1MaxSize = 15
PropertyMapFormat2Size = 18
PropertyMapFormatMaxSize = PropertyMapFormat2Size
PropertyAttributeNone = 0x00
PropertyAttributeRead = 0x01
PropertyAttributeWrite = 0x02
PropertyAttributeAnno = 0x10
PropertyAttributeReadWrite = PropertyAttributeRead | PropertyAttributeWrite
PropertyAttributeReadAnno = PropertyAttributeRead | PropertyAttributeAnno
)
const (
errorPropertyNoParentNode = "Property has no parent node"
errorPropertyNoData = "Property has no data"
)
// PropertyCode is a type for property code.
type PropertyCode = protocol.PropertyCode
// PropertyAttribute is a type for property attribute.
type PropertyAttribute uint
// Property is an instance for Echonet property.
type Property struct {
Code PropertyCode
Attr PropertyAttribute
Data []byte
ParentObject *Object
}
// NewProperty returns a new property.
func NewProperty() *Property {
prop := &Property{
Code: 0,
Attr: PropertyAttributeNone,
Data: make([]byte, 0),
ParentObject: nil,
}
return prop
}
// NewPropertyWithCode returns a new property with the specified property code.
func NewPropertyWithCode(code PropertyCode) *Property {
prop := NewProperty()
prop.SetCode(code)
return prop
}
// NewPropertiesWithCodes returns a new properties with the specified property codes.
func NewPropertiesWithCodes(codes []PropertyCode) []*Property {
props := make([]*Property, len(codes))
for n, code := range codes {
props[n] = NewPropertyWithCode(code)
}
return props
}
// SetParentObject sets a parent object into the property.
func (prop *Property) SetParentObject(obj *Object) {
prop.ParentObject = obj
}
// GetParentObject returns the parent object.
func (prop *Property) GetParentObject() *Object {
return prop.ParentObject
}
// GetNode returns a parent node of the parent object.
func (prop *Property) GetNode() Node {
parentObj := prop.GetParentObject()
if parentObj == nil {
return nil
}
return parentObj.GetParentNode()
}
// SetCode sets a specified code to the property.
func (prop *Property) SetCode(code PropertyCode) {
prop.Code = code
}
// GetCode returns the property code.
func (prop *Property) GetCode() PropertyCode {
return prop.Code
}
// ClearData clears the property data.
func (prop *Property) ClearData() {
prop.Data = make([]byte, 0)
}
// Size return the property data size.
func (prop *Property) Size() int {
return len(prop.Data)
}
// SetAttribute sets an attribute to the property.
func (prop *Property) SetAttribute(attr PropertyAttribute) {
prop.Attr = attr
}
// GetAttribute returns the property attribute.
func (prop *Property) GetAttribute() PropertyAttribute {
return prop.Attr
}
// IsReadable returns true when the property attribute is readable, otherwise false.
func (prop *Property) IsReadable() bool {
if (prop.Attr & PropertyAttributeRead) == 0 {
return false
}
return true
}
// IsWritable returns true when the property attribute is writable, otherwise false.
func (prop *Property) IsWritable() bool {
if (prop.Attr & PropertyAttributeWrite) == 0 {
return false
}
return true
}
// IsReadOnly returns true when the property attribute is read only, otherwise false.
func (prop *Property) IsReadOnly() bool {
if (prop.Attr & PropertyAttributeRead) == 0 {
return false
}
if (prop.Attr & PropertyAttributeWrite) != 0 {
return false
}
return true
}
// IsWriteOnly returns true when the property attribute is write only, otherwise false.
func (prop *Property) IsWriteOnly() bool {
if (prop.Attr & PropertyAttributeWrite) == 0 {
return false
}
if (prop.Attr & PropertyAttributeRead) != 0 {
return false
}
return true
}
// IsAnnouncement returns true when the property attribute is announcement, otherwise false.
func (prop *Property) IsAnnouncement() bool {
if (prop.Attr & PropertyAttributeAnno) == 0 {
return false
}
return true
}
// IsAvailableService returns true whether the specified service can execute, otherwise false
func (prop *Property) IsAvailableService(esv protocol.ESV) bool {
switch esv {
case protocol.ESVWriteRequest:
if prop.IsWritable() {
return true
}
return false
case protocol.ESVWriteRequestResponseRequired:
if prop.IsWritable() {
return true
}
return false
case protocol.ESVReadRequest:
if prop.IsReadable() {
return true
}
return false
case protocol.ESVNotificationRequest:
if prop.IsAnnouncement() {
return true
}
return false
case protocol.ESVWriteReadRequest:
if prop.IsWritable() && prop.IsReadable() {
return true
}
return false
case protocol.ESVNotificationResponseRequired:
if prop.IsAnnouncement() {
return true
}
return false
}
return false
}
// SetData sets a specified data to the property.
func (prop *Property) SetData(data []byte) {
prop.Data = make([]byte, len(data))
copy(prop.Data, data)
// (D) Basic sequence for autonomous notification.
if prop.IsAnnouncement() {
prop.announce()
}
}
// SetByteData is an alias of SetData.
func (prop *Property) SetByteData(data []byte) {
prop.SetData(data)
}
// SetIntegerData sets a specified integer data to the property.
func (prop *Property) SetIntegerData(data uint, size uint) {
binData := make([]byte, size)
encoding.IntegerToByte(data, binData)
prop.SetData(binData)
}
// GetData returns the property data.
func (prop *Property) GetData() []byte {
return prop.Data
}
// GetByteData returns a byte value of the property data.
func (prop *Property) GetByteData() (byte, error) {
if len(prop.Data) <= 0 {
return 0, fmt.Errorf(errorPropertyNoData)
}
return prop.Data[0], nil
}
// GetStringData returns a byte value of the property string data.
func (prop *Property) GetStringData() (string, error) {
return string(prop.Data), nil
}
// GetIntegerData returns a integer value of the property integer data.
func (prop *Property) GetIntegerData() (uint, error) {
if len(prop.Data) <= 0 {
return 0, fmt.Errorf(errorPropertyNoData)
}
return encoding.ByteToInteger(prop.GetData()), nil
}
// announce announces the property.
func (prop *Property) announce() error {
parentNode, ok := prop.GetNode().(*LocalNode)
if !ok || parentNode == nil {
return fmt.Errorf(errorPropertyNoParentNode)
}
if !parentNode.IsRunning() {
return nil
}
return parentNode.AnnounceProperty(prop)
}
// toProtocolProperty returns the new property of the property.
func (prop *Property) toProtocolProperty() *protocol.Property {
newProp := protocol.NewProperty()
newProp.SetCode(prop.GetCode())
newProp.SetData(prop.GetData())
return newProp
}
// Equals returns true if the specified property is same, otherwise false
func (prop *Property) Equals(otherProp *Property) bool {
if prop.GetCode() != otherProp.GetCode() {
return false
}
if prop.GetAttribute() != otherProp.GetAttribute() {
return false
}
if bytes.Compare(prop.GetData(), otherProp.GetData()) != 0 {
return false
}
return true
}
|
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"bytes"
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
)
/*Type1 type1
swagger:model Type1
*/
type Type1 struct {
/* characters
*/
Characters string `json:"characters,omitempty"`
}
func (m *Type1) Type() string {
return "Type1"
}
func (m *Type1) SetType(val string) {
}
// UnmarshalJSON unmarshals this polymorphic type from a JSON structure
func (m *Type1) UnmarshalJSON(raw []byte) error {
var data struct {
Type string `json:"type,omitempty"`
/* characters
*/
Characters string `json:"characters,omitempty"`
}
buf := bytes.NewBuffer(raw)
dec := json.NewDecoder(buf)
dec.UseNumber()
if err := dec.Decode(&data); err != nil {
return err
}
m.Characters = data.Characters
return nil
}
// MarshalJSON marshals this polymorphic type to a JSON structure
func (m Type1) MarshalJSON() ([]byte, error) {
var data struct {
Type string `json:"type,omitempty"`
/* characters
*/
Characters string `json:"characters,omitempty"`
}
data.Characters = m.Characters
data.Type = "Type1"
return json.Marshal(data)
}
// Validate validates this type1
func (m *Type1) Validate(formats strfmt.Registry) error {
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
package models
import (
"github.com/labstack/echo"
"gopkg.in/mgo.v2/bson"
"exons/utils"
)
type exonsunity struct {
ID bson.ObjectId `json:"_id" bson:"_id,omitempty"`
Title string `json:"title"`
Description string `json:"description"`
}
func (c *exonsunity) BindWithContext(ctx echo.Context) {
err := ctx.Bind(c)
utils.CheckErr(err)
}
|
package xhtml5_test
import (
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("quoted strings", func() {
Context("quoted strings", func() {
It("bold content alone", func() {
source := "*bold content*"
expected := `<div class="paragraph">
<p><strong>bold content</strong></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple single quoted string", func() {
source := "'`curly was single`'"
expected := `<div class="paragraph">
<p>‘curly was single’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("spaces with single quoted string", func() {
source := "'` curly was single `' or so they say"
expected := "<div class=\"paragraph\">\n" +
"<p>‘ curly was single ’ or so they say</p>\n" +
"</div>\n"
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("bold in single quoted string", func() {
source := "'`curly *was* single`'"
expected := `<div class="paragraph">
<p>‘curly <strong>was</strong> single’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("italics in single quoted string", func() {
source := "'`curly _was_ single`'"
expected := `<div class="paragraph">
<p>‘curly <em>was</em> single’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("span in single quoted string", func() {
source := "'`curly [.strikeout]#was#_is_ single`'"
expected := `<div class="paragraph">
<p>‘curly <span class="strikeout">was</span><em>is</em> single’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in monospace string", func() {
source := "'`curly `is` single`'"
expected := `<div class="paragraph">
<p>‘curly <code>is</code> single’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly as monospace string", func() {
source := "'``curly``'"
expected := `<div class="paragraph">
<p>‘<code>curly</code>’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly with nested double curly", func() {
source := "'`single\"`double`\"`'"
expected := `<div class="paragraph">
<p>‘single“double”’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in monospace string", func() {
source := "`'`curly`'`"
expected := `<div class="paragraph">
<p><code>‘curly’</code></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in italics", func() {
source := "_'`curly`'_"
expected := `<div class="paragraph">
<p><em>‘curly’</em></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in bold", func() {
source := "*'`curly`'*"
expected := `<div class="paragraph">
<p><strong>‘curly’</strong></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in title", func() {
source := "== a '`curly`' episode"
expected := `<div class="sect1">
<h2 id="_a_curly_episode">a ‘curly’ episode</h2>
<div class="sectionbody">
</div>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in list element", func() {
source := "* a '`curly`' episode"
expected := `<div class="ulist">
<ul>
<li>
<p>a ‘curly’ episode</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in labeled list", func() {
source := "'`term`':: something '`quoted`'"
expected := `<div class="dlist">
<dl>
<dt class="hdlist1">‘term’</dt>
<dd>
<p>something ‘quoted’</p>
</dd>
</dl>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in link", func() {
source := "https://www.example.com/a['`example`']"
expected := `<div class="paragraph">
<p><a href="https://www.example.com/a">‘example’</a></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("curly in quoted link", func() {
source := "https://www.example.com/a[\"an '`example`'\"]"
expected := `<div class="paragraph">
<p><a href="https://www.example.com/a">an ‘example’</a></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("image in curly", func() {
source := "'`a image:foo.png[]`'"
expected := `<div class="paragraph">
<p>‘a <span class="image"><img src="foo.png" alt="foo"/></span>’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("icon in curly", func() {
source := ":icons: font\n\n'`a icon:note[]`'"
expected := `<div class="paragraph">
<p>‘a <span class="icon"><i class="fa fa-note"></i></span>’</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("simple single quoted string", func() {
source := "\"`curly was single`\""
expected := `<div class="paragraph">
<p>“curly was single”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("spaces with double quoted string", func() {
source := "\"` curly was single `\""
expected := "<div class=\"paragraph\">\n" +
"<p>“ curly was single ”</p>\n" +
"</div>\n"
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("bold in double quoted string", func() {
source := "\"`curly *was* single`\""
expected := `<div class="paragraph">
<p>“curly <strong>was</strong> single”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("italics in double quoted string", func() {
source := "\"`curly _was_ single`\""
expected := `<div class="paragraph">
<p>“curly <em>was</em> single”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("span in double quoted string", func() {
source := "\"`curly [.strikeout]#was#_is_ single`\""
expected := `<div class="paragraph">
<p>“curly <span class="strikeout">was</span><em>is</em> single”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly in monospace string", func() {
source := "\"`curly `is` single`\""
expected := `<div class="paragraph">
<p>“curly <code>is</code> single”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly as monospace string", func() {
source := "\"``curly``\""
expected := `<div class="paragraph">
<p>“<code>curly</code>”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly with nested single curly", func() {
source := "\"`double'`single`'`\""
expected := `<div class="paragraph">
<p>“double‘single’”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly in monospace string", func() {
source := "`\"`curly`\"`"
expected := `<div class="paragraph">
<p><code>“curly”</code></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly in italics", func() {
source := "_\"`curly`\"_"
expected := `<div class="paragraph">
<p><em>“curly”</em></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly in bold", func() {
source := "*\"`curly`\"*"
expected := `<div class="paragraph">
<p><strong>“curly”</strong></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly in title", func() {
source := "== a \"`curly`\" episode"
expected := `<div class="sect1">
<h2 id="_a_curly_episode">a “curly” episode</h2>
<div class="sectionbody">
</div>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double in list element", func() {
source := "* a \"`curly`\" episode"
expected := `<div class="ulist">
<ul>
<li>
<p>a “curly” episode</p>
</li>
</ul>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("double curly in labeled list", func() {
source := "\"`term`\":: something \"`quoted`\""
expected := `<div class="dlist">
<dl>
<dt class="hdlist1">“term”</dt>
<dd>
<p>something “quoted”</p>
</dd>
</dl>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
// In a link, the quotes are ambiguous, and we default to assuming they are for enclosing
// the link text. Nest them explicitly if this is needed.
It("double curly in link (becomes mono)", func() {
source := "https://www.example.com/a[\"`example`\"]"
expected := `<div class="paragraph">
<p><a href="https://www.example.com/a">“example”</a></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
// This is the unambiguous form.
It("curly in quoted link", func() {
source := "https://www.example.com/a[\"\"`example`\"\"]"
expected := `<div class="paragraph">
<p><a href="https://www.example.com/a">“example”</a></p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("image in double curly", func() {
source := "\"`a image:foo.png[]`\""
expected := `<div class="paragraph">
<p>“a <span class="image"><img src="foo.png" alt="foo"/></span>”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
It("icon in double curly", func() {
source := ":icons: font\n\n\"`a icon:note[]`\""
expected := `<div class="paragraph">
<p>“a <span class="icon"><i class="fa fa-note"></i></span>”</p>
</div>
`
Expect(RenderXHTML(source)).To(MatchHTML(expected))
})
})
})
|
// Copyright 2020 cloudeng llc. All rights reserved.
// Use of this source code is governed by the Apache-2.0
// license that can be found in the LICENSE file.
// Package filewalk provides support for concurrent traversal of file system
// directories and files. It can traverse any filesytem that implements
// the Filesystem interface and is intended to be usable with cloud storage
// systems as AWS S3 or GCP's Cloud Storage. All compatible systems must
// implement some sort of hierarchical naming scheme, whether it be directory
// based (as per Unix/POSIX filesystems) or by convention (as per S3).
package filewalk
import (
"context"
"expvar"
"os"
"runtime"
"sort"
"strconv"
"sync"
"time"
"cloudeng.io/errors"
"cloudeng.io/sync/errgroup"
)
var listingVar = expvar.NewMap("filewalk-listing")
var walkingVar = expvar.NewMap("filewalk-walking")
// Contents represents the contents of the filesystem at the level represented
// by Path.
type Contents struct {
Path string `json:"p,omitempty"` // The name of the level being scanned.
Children []Info `json:"c,omitempty"` // Info on each of the next levels in the hierarchy.
Files []Info `json:"f,omitempty"` // Info for the files at this level.
Err error `json:"e,omitempty"` // Non-nil if an error occurred.
}
// Walker implements the filesyste walk.
type Walker struct {
fs Filesystem
opts options
contentsFn ContentsFunc
prefixFn PrefixFunc
errs *errors.M
}
// Option represents options accepted by Walker.
type Option func(o *options)
type options struct {
concurrency int
scanSize int
chanSize int
}
// Concurreny can be used to change the degree of concurrency used. The
// default is to use all available CPUs.
func Concurrency(n int) Option {
return func(o *options) {
o.concurrency = n
}
}
// ChanSize can be used to set the size of the channel used to send results
// to ResultsFunc. It defaults to being unbuffered.
func ChanSize(n int) Option {
return func(o *options) {
o.chanSize = n
}
}
// New creates a new Walker instance.
func New(filesystem Filesystem, opts ...Option) *Walker {
w := &Walker{fs: filesystem, errs: &errors.M{}}
w.opts.chanSize = 1000
w.opts.scanSize = 1000
for _, fn := range opts {
fn(&w.opts)
}
return w
}
// FileMode represents meta data about a single file, including its
// permissions. Not all underlying filesystems may support the full
// set of UNIX-style permissions.
type FileMode uint32
const (
ModePrefix FileMode = FileMode(os.ModeDir)
ModeLink FileMode = FileMode(os.ModeSymlink)
ModePerm FileMode = FileMode(os.ModePerm)
)
// String implements stringer.
func (fm FileMode) String() string {
return os.FileMode(fm).String()
}
// Info represents the information that can be retrieved for a single
// file or prefix.
type Info struct {
Name string // base name of the file
UserID string // user id as returned by the underlying system
GroupID string // group id as returned by the underlying system
Size int64 // length in bytes
ModTime time.Time // modification time
Mode FileMode // permissions, directory or link.
sys interface{} // underlying data source (can return nil)
}
// Sys returns the underlying, if available, data source.
func (i Info) Sys() interface{} {
return i.sys
}
// IsPrefix returns true for a prefix.
func (i Info) IsPrefix() bool {
return (i.Mode & ModePrefix) == ModePrefix
}
// IsLink returns true for a symbolic or other form of link.
func (i Info) IsLink() bool {
return (i.Mode & ModeLink) == ModeLink
}
// Perms returns UNIX-style permissions.
func (i Info) Perms() FileMode {
return (i.Mode & ModePerm)
}
// Filesystem represents the interface that is implemeted for filesystems to
// be traversed/scanned.
type Filesystem interface {
// Stat obtains Info for the specified path.
Stat(ctx context.Context, path string) (Info, error)
// Join is like filepath.Join for the filesystem supported by this filesystem.
Join(components ...string) string
// List will send all of the contents of path over the supplied channel.
List(ctx context.Context, path string, ch chan<- Contents)
// IsPermissionError returns true if the specified error, as returned
// by the filesystem's implementation, is a result of a permissions error.
IsPermissionError(err error) bool
// IsNotExist returns true if the specified error, as returned by the
// filesystem's implementation, is a result of the object not existing.
IsNotExist(err error) bool
}
// Error implements error and provides additional detail on the error
// encountered.
type Error struct {
Path string
Op string
Err error
}
// Error implements error.
func (e *Error) Error() string {
return "[" + e.Path + ": " + e.Op + "] " + e.Err.Error()
}
// recordError will record the specified error if it is not nil; ie.
// its safe to call it with a nil error.
func (w *Walker) recordError(path, op string, err error) error {
if err == nil {
return nil
}
w.errs.Append(&Error{path, op, err})
return err
}
func (w *Walker) listLevel(ctx context.Context, idx string, path string, info *Info) []Info {
listingVar.Set(idx, stringer(path))
ch := make(chan Contents, w.opts.concurrency)
go func(path string) {
w.fs.List(ctx, path, ch)
close(ch)
}(path)
children, err := w.contentsFn(ctx, path, info, ch)
if err != nil {
w.recordError(path, "fileFunc", err)
return nil
}
select {
case <-ctx.Done():
return nil
case <-ch:
}
sort.Slice(children, func(i, j int) bool {
if children[i].Name == children[j].Name {
return children[i].Size >= children[j].Size
}
return children[i].Name < children[j].Name
})
return children
}
type stringer string
func (s stringer) String() string {
return string(s)
}
// ContentsFunc is the type of the function that is called to consume the results
// of scanning a single level in the filesystem hierarchy. It should read
// the contents of the supplied channel until that channel is closed.
// Errors, such as failing to access the prefix, are delivered over the channel.
type ContentsFunc func(ctx context.Context, prefix string, info *Info, ch <-chan Contents) ([]Info, error)
// PrefixFunc is the type of the function that is called to determine if a given
// level in the filesystem hiearchy should be further examined or traversed.
// If stop is true then traversal stops at this point, however if a list
// of children is returned, they will be traversed directly rather than
// obtaining the children from the filesystem. This allows for both
// exclusions and incremental processing in conjunction with a database t
// be implemented.
type PrefixFunc func(ctx context.Context, prefix string, info *Info, err error) (stop bool, children []Info, returnErr error)
// Walk traverses the hierarchies specified by each of the roots calling
// prefixFn and contentsFn as it goes. prefixFn will always be called
// before contentsFn for the same prefix, but no other ordering guarantees
// are provided.
func (w *Walker) Walk(ctx context.Context, prefixFn PrefixFunc, contentsFn ContentsFunc, roots ...string) error {
rootCtx := ctx
listers, ctx := errgroup.WithContext(rootCtx)
if w.opts.concurrency <= 0 {
w.opts.concurrency = runtime.GOMAXPROCS(-1)
}
listers = errgroup.WithConcurrency(listers, w.opts.concurrency)
walkers, ctx := errgroup.WithContext(rootCtx)
walkers = errgroup.WithConcurrency(walkers, w.opts.concurrency)
w.prefixFn = prefixFn
w.contentsFn = contentsFn
// create and prime the concurrency limiter for walking directories.
walkerLimitCh := make(chan string, w.opts.concurrency*2)
for i := 0; i < cap(walkerLimitCh); i++ {
walkerLimitCh <- strconv.Itoa(i)
}
var wg sync.WaitGroup
wg.Add(2)
for _, root := range roots {
root := root
walkers.Go(func() error {
w.walker(ctx, <-walkerLimitCh, root, walkerLimitCh)
return nil
})
}
go func() {
w.errs.Append(listers.Wait())
wg.Done()
}()
go func() {
w.errs.Append(walkers.Wait())
wg.Done()
}()
waitCh := make(chan struct{})
go func() {
wg.Wait()
close(waitCh)
}()
select {
case <-rootCtx.Done():
w.errs.Append(rootCtx.Err())
case <-waitCh:
}
return w.errs.Err()
}
func (w *Walker) walkChildren(ctx context.Context, path string, children []Info, limitCh chan string) {
var wg sync.WaitGroup
wg.Add(len(children))
for _, child := range children {
child := child
var idx string
select {
case idx = <-limitCh:
case <-ctx.Done():
return
default:
// no concurreny is available fallback to sync.
w.walker(ctx, idx, w.fs.Join(path, child.Name), limitCh)
wg.Done()
continue
}
go func() {
w.walker(ctx, idx, w.fs.Join(path, child.Name), limitCh)
wg.Done()
limitCh <- idx
}()
}
wg.Wait()
}
func (w *Walker) walker(ctx context.Context, idx string, path string, limitCh chan string) {
select {
default:
case <-ctx.Done():
return
}
walkingVar.Set(idx, stringer(path))
info, err := w.fs.Stat(ctx, path)
stop, children, err := w.prefixFn(ctx, path, &info, err)
w.recordError(path, "stat", err)
if stop {
return
}
if len(children) > 0 {
w.walkChildren(ctx, path, children, limitCh)
return
}
children = w.listLevel(ctx, idx, path, &info)
if len(children) > 0 {
w.walkChildren(ctx, path, children, limitCh)
}
}
|
/**
*
* By So http://sooo.site
* -----
* Don't panic.
* -----
*
*/
package e
// GetMsg 获取错误信息
func GetMsg(code int) string {
if msg, ok := Msg[code]; ok {
return msg
}
return Msg[Fail]
}
|
package main
import (
"fmt"
"log"
"os"
"runtime"
"sort"
"text/tabwriter"
"time"
"github.com/azmodb/exp/azmo"
"github.com/azmodb/exp/azmo/build"
"golang.org/x/net/context"
)
type command struct {
Run func(ctx context.Context, d *dialer, args []string) error
Args string
Help string
Short string
}
type dialer struct {
addr string
timeout time.Duration
}
func (d *dialer) dial() *azmo.Client {
c, err := azmo.Dial(d.addr, d.timeout)
if err != nil {
log.Fatalf("dialing azmo database server: %v")
}
return c
}
var commands = map[string]command{}
func init() {
commands["foreach"] = forEachCmd
commands["range"] = rangeCmd
commands["get"] = getCmd
commands["watch"] = watchCmd
commands["put"] = putCmd
commands["delete"] = delCmd
commands["version"] = versionCmd
commands["help"] = helpCmd
}
const helpMsg = `
Use "azmo help [command]" for more information about a command.
`
var (
helpCmd = command{
Help: helpMsg,
Short: "information about a command",
Args: "command",
Run: func(_ context.Context, d *dialer, args []string) error {
if len(args) <= 0 {
fmt.Fprintln(os.Stderr, helpMsg)
os.Exit(2)
}
cmd, found := commands[args[0]]
if !found {
return fmt.Errorf("%s: unknown command %q", self, args[0])
}
fmt.Println(cmd.Help)
return nil
},
}
versionCmd = command{
Help: "Information about AzmoDB build version",
Short: "information about version",
Args: "",
Run: func(_ context.Context, d *dialer, args []string) error {
version()
return nil
},
}
)
func version() {
tw := tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\t', 0)
fmt.Fprintf(tw, "AzmoDB Version:\t%s\n", build.Version())
fmt.Fprintf(tw, "ARCH:\t%s\n", runtime.GOARCH)
fmt.Fprintf(tw, "OS:\t%s\n", runtime.GOOS)
tw.Flush()
}
type help struct {
name string
text string
}
type helps []help
func (p helps) Less(i, j int) bool { return p[i].name < p[j].name }
func (p helps) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p helps) Len() int { return len(p) }
func printDefaults() {
helps := make(helps, 0, len(commands))
max := 0
for name, cmd := range commands {
n := name + " " + cmd.Args
if len(n) > max {
max = len(n)
}
helps = append(helps, help{name: n, text: cmd.Short})
}
sort.Sort(helps)
i := 0
for _, help := range helps {
fmt.Fprintf(os.Stderr, " %-*s - %s\n",
max, help.name, help.text)
i++
}
}
|
package main
/*
sync/atomic
实现原理
大致是向CPU发送对某一个块内存的LOCK信号,然后就将此内存块加锁,从而保证了内存块操作的原子性
1. 可以在并发的场景下对变量进行非侵入式的操作,保证并发安全
2. 解决的典型问题就是 i++和CAS(Compare-and-Swap)的线程安全问题
*/
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
func main() {
test1()
//test2()
}
//count++ 并发不安全
func test1() {
var wg sync.WaitGroup
count := 0
t := time.Now()
for i := 0; i < 10000; i++ {
wg.Add(1)
go func() {
count++
wg.Done()
}()
}
wg.Wait()
fmt.Printf("test1花费时间:%d, count的值为:%d \n", time.Now().Sub(t), count)
}
//使用atomic.AddInt64(&count, 1)原子操作
func test2() {
var wg sync.WaitGroup
count := int64(0)
t := time.Now()
for i := 0; i < 10000; i++ {
wg.Add(1)
go func() {
atomic.AddInt64(&count, 1)
wg.Done()
}()
}
wg.Wait()
fmt.Printf("test2 花费时间:%d, count的值为:%d \n",time.Now().Sub(t),count)
} |
package main
import (
"crypto/md5"
"encoding/base64"
"fmt"
"io"
"os"
fp "path/filepath"
"strings"
"time"
"github.com/codegangsta/cli"
)
var vpnOps []cli.Command
func init() {
vpnIdFlag := cli.StringFlag{
Name: "id, i",
Usage: "ID of the VPN.",
}
vpnOps = []cli.Command{
{
Name: "vpn",
Description: "1&1 vpn operations",
Usage: "VPN operations.",
Subcommands: []cli.Command{
{
Name: "configfile",
Usage: "Downloads VPN configuration files as a zip arhive.",
Flags: []cli.Flag{
vpnIdFlag,
cli.StringFlag{
Name: "dir",
Usage: "Directory where to store the VPN configuration.",
},
cli.StringFlag{
Name: "name, n",
Usage: "Name of the confiration file arhive.",
},
},
Action: downloadVPNConfig,
},
{
Name: "create",
Usage: "Creates new VPN.",
Flags: []cli.Flag{
cli.StringFlag{
Name: "datacenterid",
Usage: "Data center ID of the VPN.",
},
cli.StringFlag{
Name: "desc, d",
Usage: "Description of the VPN.",
},
cli.StringFlag{
Name: "name, n",
Usage: "Name of the VPN.",
},
},
Action: createVPN,
},
{
Name: "info",
Usage: "Shows information about VPN.",
Flags: []cli.Flag{vpnIdFlag},
Action: showVPN,
},
{
Name: "list",
Usage: "Lists all available VPNs.",
Flags: queryFlags,
Action: listVPNs,
},
{
Name: "modify",
Usage: "Modifies VPN configuration.",
Flags: []cli.Flag{
vpnIdFlag,
cli.StringFlag{
Name: "name, n",
Usage: "New name of the VPN.",
},
cli.StringFlag{
Name: "desc, d",
Usage: "New description of the VPN.",
},
},
Action: modifyVPN,
},
{
Name: "rm",
Usage: "Deletes VPN.",
Flags: []cli.Flag{vpnIdFlag},
Action: deleteVPN,
},
},
},
}
}
func createVPN(ctx *cli.Context) {
vpnName := getRequiredOption(ctx, "name")
vpnDesc := ctx.String("desc")
datacenterId := ctx.String("datacenterid")
_, vpn, err := api.CreateVPN(vpnName, vpnDesc, datacenterId)
exitOnError(err)
output(ctx, vpn, okWaitMessage, false, nil, nil)
}
func listVPNs(ctx *cli.Context) {
vpns, err := api.ListVPNs(getQueryParams(ctx))
exitOnError(err)
data := make([][]string, len(vpns))
for i, vpn := range vpns {
data[i] = []string{
vpn.Id,
vpn.Name,
vpn.Type,
formatDateTime(time.RFC3339, vpn.CreationDate),
vpn.State,
getDatacenter(vpn.Datacenter),
}
}
header := []string{"ID", "Name", "Type", "Creation Date", "State", "Data Center"}
output(ctx, vpns, "", false, &header, &data)
}
func showVPN(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
vpn, err := api.GetVPN(id)
exitOnError(err)
output(ctx, vpn, "", true, nil, nil)
}
func deleteVPN(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
vpn, err := api.DeleteVPN(id)
exitOnError(err)
output(ctx, vpn, okWaitMessage, false, nil, nil)
}
func modifyVPN(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
vpn, err := api.ModifyVPN(id, ctx.String("name"), ctx.String("desc"))
exitOnError(err)
output(ctx, vpn, okWaitMessage, false, nil, nil)
}
func downloadVPNConfig(ctx *cli.Context) {
id := getRequiredOption(ctx, "id")
var fileName, directory string
var err error
if ctx.IsSet("dir") {
directory = ctx.String("dir")
} else {
directory, err = os.Getwd()
exitOnError(err)
}
// Make it absolute
if !fp.IsAbs(directory) {
directory, err = fp.Abs(directory)
exitOnError(err)
}
// Check if the directory exists
_, err = os.Stat(directory)
if err != nil {
if os.IsNotExist(err) {
// make all dirs
exitOnError(os.MkdirAll(directory, 0666))
} else {
exitOnError(err)
}
}
content, err := api.GetVPNConfigFile(id, directory)
exitOnError(err)
var data []byte
data, err = base64.StdEncoding.DecodeString(content)
exitOnError(err)
if ctx.IsSet("name") {
fileName = ctx.String("name")
if !strings.HasSuffix(fileName, ".zip") {
fileName += ".zip"
}
} else {
fileName = "vpn_" + fmt.Sprintf("%x", md5.Sum(data)) + ".zip"
}
fpath := fp.Join(directory, fileName)
f, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0666)
defer f.Close()
exitOnError(err)
var n int
n, err = f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
exitOnError(err)
fmt.Printf("VPN configuration written to: \"%s\"\n", fpath)
}
|
package ravendb
import (
"net/http"
"strconv"
)
var _ IMaintenanceOperation = &GetIndexesOperation{}
type GetIndexesOperation struct {
_start int
_pageSize int
Command *GetIndexesCommand
}
func NewGetIndexesOperation(_start int, _pageSize int) *GetIndexesOperation {
return &GetIndexesOperation{
_start: _start,
_pageSize: _pageSize,
}
}
func (o *GetIndexesOperation) GetCommand(conventions *DocumentConventions) (RavenCommand, error) {
o.Command = NewGetIndexesCommand(o._start, o._pageSize)
return o.Command, nil
}
var (
_ RavenCommand = &GetIndexesCommand{}
)
type GetIndexesCommand struct {
RavenCommandBase
_start int
_pageSize int
Result []*IndexDefinition
}
func NewGetIndexesCommand(_start int, _pageSize int) *GetIndexesCommand {
res := &GetIndexesCommand{
RavenCommandBase: NewRavenCommandBase(),
_start: _start,
_pageSize: _pageSize,
}
res.IsReadRequest = true
return res
}
func (c *GetIndexesCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
start := strconv.Itoa(c._start)
pageSize := strconv.Itoa(c._pageSize)
url := node.URL + "/databases/" + node.Database + "/indexes?start=" + start + "&pageSize=" + pageSize
return newHttpGet(url)
}
func (c *GetIndexesCommand) SetResponse(response []byte, fromCache bool) error {
if response == nil {
return throwInvalidResponse()
}
var res struct {
Results []*IndexDefinition `json:"Results"`
}
err := jsonUnmarshal(response, &res)
if err != nil {
return err
}
c.Result = res.Results
return nil
}
|
package main
import (
"encoding/json"
"fmt"
)
/*
1.channel为nil,读写都会阻塞
2.结构体访问控制,结构体中的属性大写,外部才能访问
*/
func main() {
chanIssue()
jsonToStruct()
}
func chanIssue() {
//未分配内存,ch为nil
var ch chan int
select {
case v, ok := <-ch:
println(v, ok)
default:
println("default")
}
}
type People struct {
Name string `json: "name"`
}
func jsonToStruct() {
js := `{
"name": "seekload"
}`
var p People
err := json.Unmarshal([]byte(js), &p)
if err != nil {
fmt.Println("err:", err)
return
}
fmt.Println(p)
}
|
package problem0205
func isIsomorphic(s string, t string) bool {
m1 := make(map[byte]byte, len(s))
m2 := make(map[byte]byte, len(t))
for i := 0; i < len(s); i++ {
if _, ok := m1[s[i]]; ok {
if m1[s[i]] != t[i] {
return false
}
}
m1[s[i]] = t[i]
if _, ok := m2[t[i]]; ok {
if m2[t[i]] != s[i] {
return false
}
}
m2[t[i]] = s[i]
}
return true
}
|
package main
import "testing"
func TestTranslateString(t *testing.T) {
check := map[string]string{
"a4bc2d5e": "aaaabccddddde",
"abcd": "abcd",
"qwe\\5": "qwe\\\\\\\\\\",
}
for testString, correctResult := range check {
letTest, err := translateString(testString)
if err != nil {
t.Fatalf("Error %v occurated by parsing %s string. Correct is %v.", err, letTest, correctResult)
}
if letTest != correctResult {
t.Fatalf("Incorrect parsing string %s. Result: %v, expected: %v", testString, letTest, correctResult)
}
}
}
func TestWrongStringsTranslateString(t *testing.T) {
incorrect := "45"
letTest, err := translateString(incorrect)
if letTest != "" {
t.Fatalf("Wrong parsing string %s, NOT EMPTY STRING. Returning %v", incorrect, letTest)
}
if err == nil {
t.Fatalf("Wrong parsing string %s, expected ERROR", incorrect)
}
}
|
package main
import "fmt"
// the interface bot defines that the implementor must have a function
// getGreeting
type bot interface {
getGreeting(string) string
getBotVersion() string
}
type englishBot struct {
version string
}
type spanishBot struct {
version string
}
func main() {
eb := englishBot{version: "1AB"}
sb := spanishBot{version: "3RR"}
printGreeting(eb)
printGreeting(sb)
printVersion(eb)
printVersion(sb)
}
// passing in the interface bot
// what ever is pass in implements this interface
// therefore, whatever passed in needs to implement getGreeting function
func printGreeting(b bot) {
fmt.Println(b.getGreeting("ola"))
}
func printVersion(b bot) {
fmt.Println(b.getBotVersion())
}
func (englishBot) getGreeting(s string) string {
return "Hello!"
}
func (spanishBot) getGreeting(s string) string {
return "ola!"
}
func (e englishBot) getBotVersion() string {
return e.version
}
func (e spanishBot) getBotVersion() string {
return e.version
}
|
package main
import (
"fmt"
"github.com/jacobsa/go-serial/serial"
"github.com/marcinbor85/gohex"
"io"
"net"
"os"
"strconv"
"strings"
"time"
)
var (
debugSpeed uint = 57600 // console serial speed
)
// *** Debug Interfaces ***
// These connect to the debug interface of the target system
// A servicer is run as a goroutine
// They take commands on the debug command channel in the form of
// words []string and send their results via the debug output channel
// This one does nothing but eat the debug input stream and report
// an error, in the case where the debug and command inputs are
// in use, but the debug interface is not connected
func nullDebugInterface() {
for {
select {
case words := <-debugCommandChan:
debugOutputChan <- fmt.Sprintf("Bad command: %s\n", words[0])
}
}
}
// To be used when the -no-debug option is specified, just eat any
// commands that come from the channel
func noDebugInterface() {
for {
select {
case <-debugCommandChan:
// nothing
}
}
}
// Return a debugger interface for the given device. In this case, we need to
// abstract the socket or serial device so that we can share the command
// processing structure, and make sure that the debug interface has access to
// them. Note the debug interface is synchronous.
func getDebugInterface(device string) debugInterfaceFunc {
var deviceReadWriter io.ReadWriter
fi, err := os.Stat(device)
if err != nil {
debugOutputChan <- "Error accessing debug interface device\n"
return nil
}
if (fi.Mode() & os.ModeSocket) != 0 {
debugSpeed = 0
deviceReadWriter, err = net.Dial("unix", device)
if err != nil {
debugOutputChan <- fmt.Sprintf("Failed to connect to socket %s: %v", device, err)
return nil
}
} else if (fi.Mode() & os.ModeDevice) != 0 {
options := serial.OpenOptions{
PortName: device,
BaudRate: debugSpeed,
DataBits: 8,
StopBits: 1,
MinimumReadSize: 0,
InterCharacterTimeout: 1000,
}
deviceReadWriter, err = serial.Open(options)
if err != nil {
debugOutputChan <- fmt.Sprintf("Failed to connect to device %s: %v", device, err)
return nil
}
}
if deviceReadWriter == nil {
debugOutputChan <- "Invalid debug interface device"
return nil
}
debugOutputChan <- fmt.Sprintf("Connected to debugger at %s\n", device)
return func() {
debugResync(deviceReadWriter)
for {
select {
case words := <-debugCommandChan:
doDebugCommand(deviceReadWriter, words)
}
}
}
}
func doDebugCommand(rw io.ReadWriter, words []string) {
CmdSwitch:
switch strings.ToLower(words[0]) {
case "stop":
rw.Write([]byte("]"))
debugOutputChan <- "Stop sent!\n"
case "cont", "go":
rw.Write([]byte("["))
debugOutputChan <- "Go sent!\n"
case "reset":
rw.Write([]byte("R"))
debugOutputChan <- "Reset sent!\n"
case "step":
rw.Write([]byte("X"))
debugOutputChan <- "Step sent!\n"
case "run":
rw.Write([]byte("]R["))
debugOutputChan <- "Run sent!\n"
case "read":
args := words[1:]
for _, a := range args {
if a == "" {
continue // ignore empty strings
}
addr, err := strconv.ParseUint(a, 0, 24)
if err != nil {
debugOutputChan <- fmt.Sprintf("Bad address: %s\n", a)
continue
}
debugWriteHex(rw, uint(addr>>16), 2)
debugWriteChars(rw, ":")
debugWriteHex(rw, uint(addr), 4)
debugWriteChars(rw, "#")
for j := 0; j < 4; j++ {
buf := make([]uint, 16)
for i := 0; i < 16; i++ {
debugWriteChars(rw, "@")
buf[i] = debugReadByte(rw)
}
debugOutputChan <- fmt.Sprintf("%08X ", addr)
for i, v := range buf {
if i == 8 {
debugOutputChan <- " "
}
debugOutputChan <- fmt.Sprintf("%02X ", v)
}
debugOutputChan <- "["
for _, v := range buf {
if v >= 32 && v < 127 {
debugOutputChan <- string(v)
} else {
debugOutputChan <- " "
}
}
debugOutputChan <- "]\n"
addr += 16
}
}
case "write":
args := words[1:]
haveAddr := false
byteCount := 0
for _, a := range args {
if a == "" {
continue // ignore empty strings
}
if haveAddr {
dat, err := strconv.ParseUint(a, 0, 8)
if err != nil {
debugOutputChan <- fmt.Sprintf("Bad data: %s\n", a)
break
}
debugWriteHex(rw, uint(dat), 2)
debugWriteChars(rw, "!")
byteCount++
} else {
addr, err := strconv.ParseUint(a, 0, 24)
if err != nil {
debugOutputChan <- fmt.Sprintf("Bad address: %s\n", a)
break
}
debugWriteHex(rw, uint(addr>>16), 2)
debugWriteChars(rw, ":")
debugWriteHex(rw, uint(addr), 4)
debugWriteChars(rw, "#")
haveAddr = true
}
}
plural := "s"
if byteCount == 1 {
plural = ""
}
debugOutputChan <- fmt.Sprintf("Wrote %v byte%s!\n", byteCount, plural)
case "program", "verify":
program := strings.ToLower(words[0]) == "program"
args := words[1:]
if len(args) > 0 && args[0] == "" {
args = args[1:]
}
if len(args) == 0 {
debugOutputChan <- "No file specified!\n"
break // out of switch
}
file, err := os.Open(args[0])
if err != nil {
debugOutputChan <- fmt.Sprintf("Could not open %s: %v\n", args[0], err)
break
}
defer file.Close()
mem := gohex.NewMemory()
err = mem.ParseIntelHex(file)
if err != nil {
debugOutputChan <- fmt.Sprintf("Could not parse %s: %v\n", args[0], err)
break
}
debugWriteChars(rw, "]R")
for segNum, segment := range mem.GetDataSegments() {
plural := "s"
if len(segment.Data) == 1 {
plural = ""
}
if program {
debugOutputChan <- "Programming"
} else {
debugOutputChan <- "Verifying"
}
debugOutputChan <- fmt.Sprintf(
" segment %v at 0x%08x, %v byte%s\n",
segNum, segment.Address, len(segment.Data),
plural)
debugWriteHex(rw, uint(segment.Address>>16), 2)
debugWriteChars(rw, ":")
debugWriteHex(rw, uint(segment.Address), 4)
debugWriteChars(rw, "#")
for idx, dat := range segment.Data {
if idx != 0 && ((segment.Address+uint32(idx))&0xFFFF == 0) {
// roll over to next bank
debugWriteHex(rw, uint(segment.Address+uint32(idx))>>16, 2)
debugWriteChars(rw, ":")
debugWriteHex(rw, 0, 4)
debugWriteChars(rw, "#")
}
if program {
debugWriteHex(rw, uint(dat), 2)
debugWriteChars(rw, "!")
} else {
debugWriteChars(rw, "@")
b := debugReadByte(rw)
if b != uint(dat) {
debugOutputChan <- fmt.Sprintf("Validation failed at %08x!\n", segment.Address+uint32(idx))
break CmdSwitch
}
}
}
debugOutputChan <- "Segment complete!\n"
}
if program {
debugOutputChan <- "Program complete!\n"
} else {
debugOutputChan <- "Verify complete!\n"
}
case "flash", "verify-rom":
program := strings.ToLower(words[0]) == "flash"
args := words[1:]
if len(args) > 0 && args[0] == "" {
args = args[1:]
}
if len(args) == 0 {
debugOutputChan <- "No file specified!\n"
break // out of switch
}
file, err := os.Open(args[0])
if err != nil {
debugOutputChan <- fmt.Sprintf("Could not open %s: %v\n", args[0], err)
break
}
defer file.Close()
mem := gohex.NewMemory()
err = mem.ParseIntelHex(file)
if err != nil {
debugOutputChan <- fmt.Sprintf("Could not parse %s: %v\n", args[0], err)
break
}
if program {
debugWriteChars(rw, "]R")
debugOutputChan <- "Erasing chip...\n"
debugEraseChip(rw, 0x20)
debugOutputChan <- "Flashing...\n"
} else {
debugWriteChars(rw, "]")
debugOutputChan <- "Verifying...\n"
}
for segNum, segment := range mem.GetDataSegments() {
plural := "s"
if len(segment.Data) == 1 {
plural = ""
}
// Force segments into flash address space
segAddr := 0x20_0000 | (segment.Address & 0x0F_FFFF)
if program {
debugOutputChan <- "Programming"
} else {
debugOutputChan <- "Verifying"
}
debugOutputChan <- fmt.Sprintf(
" segment %v at 0x%08x, %v byte%s\n",
segNum, segAddr, len(segment.Data),
plural)
debugWriteHex(rw, uint(segAddr>>16), 2)
debugWriteChars(rw, ":")
if !program {
debugWriteHex(rw, 0, 4)
debugWriteChars(rw, "#")
}
for idx, dat := range segment.Data {
if idx&0x7FF == 0 {
debugOutputChan <- fmt.Sprintf("%v%%\r", idx*100/len(segment.Data))
}
if idx != 0 && ((segAddr+uint32(idx))&0xFFFF == 0) {
// roll over to next bank
debugWriteHex(rw, uint(segAddr+uint32(idx))>>16, 2)
debugWriteChars(rw, ":")
if !program {
debugWriteHex(rw, 0, 4)
debugWriteChars(rw, "#")
}
}
if program {
// Since the erased chip is all 0xFF, we won't write those.
// This will speed up flashing.
if dat != 0xFF {
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteCmd(rw, 0x5555, 0xA0)
debugWriteCmd(rw, uint(segAddr+uint32(idx))&0xFFFF, uint(dat))
//debugCmdWait(20) // probably not needed
}
} else {
debugWriteChars(rw, "@")
b := debugReadByte(rw)
if b != uint(dat) {
debugOutputChan <- fmt.Sprintf("Validation failed at %08x!\n", segAddr+uint32(idx))
break CmdSwitch
}
}
}
debugOutputChan <- "Segment complete!\n"
}
if program {
debugOutputChan <- "Flash"
} else {
debugOutputChan <- "Verify"
}
debugOutputChan <- " complete!\n"
case "mapram":
debugWriteChars(rw, "]R")
debugWriteHex(rw, 0x08, 2)
debugWriteChars(rw, ":")
for i := uint(0); i < 16; i++ {
debugWriteCmd(rw, 2*i, i*16)
debugWriteCmd(rw, 2*i+1, 0x80)
}
debugOutputChan <- "RAM mapped to bank 0!\n"
case "erase":
debugEraseChip(rw, 0x20)
debugOutputChan <- "Flash ROM erased!\n"
case "chipid":
id0, id1 := debugChipID(rw, 0x20)
debugOutputChan <- "Manufacturer: "
switch id0 {
case 0xBF:
debugOutputChan <- "SST, device: "
switch id1 {
case 0xD5:
debugOutputChan <- "39xF010 (128K)\n"
case 0xD6:
debugOutputChan <- "39xF020 (256K)\n"
case 0xD7:
debugOutputChan <- "39xF040 (512K)\n"
default:
debugOutputChan <- fmt.Sprintf("0x%02X\n", id1)
}
default:
debugOutputChan <- fmt.Sprintf("0x%02X, device: 0x%02X\n", id0, id1)
}
case "resync":
debugResync(rw)
default:
debugOutputChan <- fmt.Sprintf("Unknown command: '%s'\n", words[0])
}
}
func debugWriteChars(rw io.ReadWriter, s string) {
var err error
obuf := make([]byte, 1)
for _, b := range []byte(s) {
obuf[0] = b
_, err = rw.Write(obuf)
if err != nil {
break
}
if debugSpeed > 0 {
// Pace characters so we don't overwhelm the receive buffer
time.Sleep((10000000 / time.Duration(debugSpeed)) * time.Microsecond)
}
}
if err != nil {
debugOutputChan <- fmt.Sprintf("Error writing to debug device: %v\n", err)
}
}
func debugWriteHex(rw io.ReadWriter, v uint, l uint) {
for i := uint(0); i < l; i++ {
n := (v >> (4 * (l - i - 1))) & 0xF
debugWriteChars(rw, fmt.Sprintf("%X", n))
}
}
func debugWriteCmd(rw io.ReadWriter, addr uint, dat uint) {
debugWriteHex(rw, addr, 4)
debugWriteChars(rw, "#")
debugWriteHex(rw, dat, 2)
debugWriteChars(rw, "!")
}
func debugCmdWait(t int) {
// Can't sync with go-serial, unfortunately
// just have to hope this works
time.Sleep(time.Duration(t) * time.Microsecond)
}
func debugReadByte(rw io.ReadWriter) uint {
buf := make([]byte, 2)
_, err := rw.Read(buf)
if err != nil {
debugOutputChan <- fmt.Sprintf("Error reading from debug device: %v\n", err)
} else {
i, err := strconv.ParseUint(string(buf), 16, 8)
if err != nil {
debugOutputChan <- fmt.Sprintf("Error parsing data from debug device: %v\n", err)
}
return uint(i)
}
return 0
}
func debugEraseChip(rw io.ReadWriter, bank uint) {
// Stop & reset
debugWriteChars(rw, "]R")
// Send bank of flash chip
debugWriteHex(rw, bank, 2)
debugWriteChars(rw, ":")
// Erase chip
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteCmd(rw, 0x5555, 0x80)
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteCmd(rw, 0x5555, 0x10)
debugCmdWait(100000)
}
func debugEraseSector(rw io.ReadWriter, sa uint) {
// Stop
debugWriteChars(rw, "]")
// Send bank of flash chip
debugWriteHex(rw, (sa >> 16) & 0xF0, 2)
debugWriteChars(rw, ":")
// Erase sector
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteCmd(rw, 0x5555, 0x80)
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteHex(rw, sa >> 16, 2)
debugWriteChars(rw, ":")
debugWriteCmd(rw, sa & 0xFFFF, 0x30)
debugCmdWait(50000)
}
func debugChipID(rw io.ReadWriter, bank uint) (uint, uint) {
// Stop
debugWriteChars(rw, "]")
// Send bank of flash chip
debugWriteHex(rw, bank, 2)
debugWriteChars(rw, ":")
// Software ID mode enter
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteCmd(rw, 0x5555, 0x90)
// Now read the ID bytes
debugWriteHex(rw, 0, 4)
debugWriteChars(rw, "#")
debugWriteChars(rw, "@")
id0 := debugReadByte(rw)
debugWriteChars(rw, "@")
id1 := debugReadByte(rw)
// Software ID mode exit
debugWriteCmd(rw, 0x5555, 0xAA)
debugWriteCmd(rw, 0x2AAA, 0x55)
debugWriteCmd(rw, 0x5555, 0xF0)
return id0, id1
}
func debugResync(rw io.ReadWriter) int {
buf := make([]byte, 16)
n, _ := rw.Read(buf)
if n > 0 {
plural := "s"
if n == 1 {
plural = ""
}
debugOutputChan <- fmt.Sprintf("Discarded %v byte%s from debug device.\n", n, plural)
}
return n
}
|
package types
import (
"io"
comm "github.com/zhaohaijun/matrixchain/common"
"github.com/zhaohaijun/matrixchain/p2pserver/common"
)
type BlocksReq struct {
HeaderHashCount uint8
HashStart comm.Uint256
HashStop comm.Uint256
}
//Serialize message payload
func (this *BlocksReq) Serialization(sink *comm.ZeroCopySink) error {
sink.WriteUint8(this.HeaderHashCount)
sink.WriteHash(this.HashStart)
sink.WriteHash(this.HashStop)
return nil
}
func (this *BlocksReq) CmdType() string {
return common.GET_BLOCKS_TYPE
}
//Deserialize message payload
func (this *BlocksReq) Deserialization(source *comm.ZeroCopySource) error {
var eof bool
this.HeaderHashCount, eof = source.NextUint8()
this.HashStart, eof = source.NextHash()
this.HashStop, eof = source.NextHash()
if eof {
return io.ErrUnexpectedEOF
}
return nil
}
|
package traefik
import (
"github.com/layer5io/meshery-adapter-library/adapter"
"github.com/layer5io/meshery-adapter-library/status"
mesherykube "github.com/layer5io/meshkit/utils/kubernetes"
)
func (mesh *Mesh) installSampleApp(namespace string, del bool, templates []adapter.Template) (string, error) {
st := status.Installing
if del {
st = status.Removing
}
for _, template := range templates {
err := mesh.applyManifest([]byte(template.String()), del, namespace)
if err != nil {
return st, ErrSampleApp(err)
}
}
return status.Installed, nil
}
func (mesh *Mesh) applyManifest(contents []byte, isDel bool, namespace string) error {
kclient := mesh.MesheryKubeclient
err := kclient.ApplyManifest(contents, mesherykube.ApplyOptions{
Namespace: namespace,
Update: true,
Delete: isDel,
})
if err != nil {
return err
}
return nil
}
|
package dynatrace
import (
"encoding/json"
"fmt"
"strconv"
)
const entitiesPath = "/api/v2/entities"
// EntitiesResponse represents the response from Dynatrace entities endpoints
type EntitiesResponse struct {
TotalCount int `json:"totalCount"`
PageSize int `json:"pageSize"`
NextPageKey string `json:"nextPageKey"`
Entities []Entity `json:"entities"`
}
// Tag represents a tag applied to a Dynatrace entity
type Tag struct {
Context string `json:"context"`
Key string `json:"key"`
StringRepresentation string `json:"stringRepresentation"`
Value string `json:"value,omitempty"`
}
// Entity represents a Dynatrace entity
type Entity struct {
EntityID string `json:"entityId"`
DisplayName string `json:"displayName"`
Tags []Tag `json:"tags"`
}
// EntitiesClient is a client for interacting with the Dynatrace entities endpoints
type EntitiesClient struct {
Client ClientInterface
}
// NewEntitiesClient creates a new EntitiesClient
func NewEntitiesClient(client ClientInterface) *EntitiesClient {
return &EntitiesClient{
Client: client,
}
}
// GetKeptnManagedServices gets all service entities with a keptn_managed and keptn_service tag
func (ec *EntitiesClient) GetKeptnManagedServices() ([]Entity, error) {
entities := []Entity{}
nextPageKey := ""
// TODO 2021-08-20: Investigate if pageSize should be optimized or removed
pageSize := 50
for {
var response []byte
var err error
if nextPageKey == "" {
response, err = ec.Client.Get(entitiesPath + "?entitySelector=type(\"SERVICE\")%20AND%20tag(\"keptn_managed\",\"[Environment]keptn_managed\")%20AND%20tag(\"keptn_service\",\"[Environment]keptn_service\")&fields=+tags&pageSize=" + strconv.FormatInt(int64(pageSize), 10))
} else {
response, err = ec.Client.Get(entitiesPath + "?nextPageKey=" + nextPageKey)
}
if err != nil {
return nil, err
}
entitiesResponse := &EntitiesResponse{}
err = json.Unmarshal(response, entitiesResponse)
if err != nil {
return nil, fmt.Errorf("could not deserialize EntitiesResponse: %v", err)
}
entities = append(entities, entitiesResponse.Entities...)
if entitiesResponse.NextPageKey == "" {
break
}
nextPageKey = entitiesResponse.NextPageKey
}
return entities, nil
}
|
package handler
import (
"log"
"net/http"
"github.com/sparcs-home-go/internal/app/configure"
"github.com/sparcs-home-go/internal/app/service"
)
// SSOLogin : login thr sso
func SSOLogin(w http.ResponseWriter, r *http.Request) {
session := configure.GetSession(r)
if auth, ok := session.Values["authenticated"].(bool); ok && auth {
nextURL := session.Values["next"].(string)
if nextURL == "" {
nextURL = configure.AppProperties.LoginRedirectURL
}
http.Redirect(w, r, nextURL, 301)
return
}
loginURL, state := service.GetLoginParams()
session.Values["ssoState"] = state
session.Save(r, w)
http.Redirect(w, r, loginURL, 301)
}
// SSOLoginCallback : callback from SSO
func SSOLoginCallback(w http.ResponseWriter, r *http.Request) {
session := configure.GetSession(r)
prevState, ok := session.Values["ssoState"].(string)
if !ok {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Forbidden session"))
return
}
ssoState := r.FormValue("state")
if prevState != ssoState {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Forbidden session"))
return
}
code := r.FormValue("code")
sso, err := service.GetUserInfo(code)
if err != nil {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Not registered in SSO"))
return
}
session.Values["authenticated"] = true
session.Values["sid"] = sso["sid"]
sparcsID := sso["sparcs_id"]
if sparcsID != "" {
session.Values["sparcsID"] = sparcsID
session.Values["isSPARCS"] = true
// handle admin
} else {
session.Values["isSPARCS"] = false
}
redirectURL := session.Values["next"].(string)
if redirectURL == "" {
redirectURL = configure.AppProperties.LoginRedirectURL
} else {
delete(session.Values, "next")
}
session.Save(r, w)
http.Redirect(w, r, redirectURL, 301)
}
// SSOLogout : logout from SSO
func SSOLogout(w http.ResponseWriter, r *http.Request) {
session := configure.GetSession(r)
if auth, ok := session.Values["authenticated"].(bool); !ok || !auth {
log.Println("Logout by unauthenticated user")
http.Redirect(w, r, configure.AppProperties.LogoutRedirectURL, 301)
return
}
sid := session.Values["sid"].(string)
logoutURL := service.GetLogoutURL(sid)
session.Options.MaxAge = -1
session.Save(r, w)
log.Println("Logout from sparcs.org, logoutURL ", logoutURL)
http.Redirect(w, r, logoutURL, 301)
}
|
package api
import (
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
)
var accessor = meta.NewAccessor()
func GetMatcher(selector ClusterResourceQuotaSelector) (func(obj runtime.Object) (bool, error), error) {
var labelSelector labels.Selector
if selector.LabelSelector != nil {
var err error
labelSelector, err = unversioned.LabelSelectorAsSelector(selector.LabelSelector)
if err != nil {
return nil, err
}
}
var annotationSelector labels.Selector
if len(selector.AnnotationSelector) > 0 {
var err error
annotationSelector, err = unversioned.LabelSelectorAsSelector(&unversioned.LabelSelector{MatchLabels: selector.AnnotationSelector})
if err != nil {
return nil, err
}
}
return func(obj runtime.Object) (bool, error) {
if labelSelector != nil {
objLabels, err := accessor.Labels(obj)
if err != nil {
return false, err
}
if !labelSelector.Matches(labels.Set(objLabels)) {
return false, nil
}
}
if annotationSelector != nil {
objAnnotations, err := accessor.Annotations(obj)
if err != nil {
return false, err
}
if !annotationSelector.Matches(labels.Set(objAnnotations)) {
return false, nil
}
}
return true, nil
}, nil
}
|
package lantern_cache
import (
"encoding/binary"
)
/*
┌───────────────────┐
│ entry marshal │
├─────┬─────┬─────┬─┴───┬─────┐
│ 8 │ 2 │ 2 │ n │ m │
│ │ │ │ │ │
├─────┼─────┼─────┼─────┼─────┤
│ ts │ key │ val │ key │ val │
│ │size │size │ │ │
└─────┴─────┴─────┴─────┴─────┘
*/
func wrapEntry(blob []byte, timestamp int64, key, val []byte) []byte {
size := EntryHeadFieldSizeOf + len(key) + len(val)
if blob == nil {
blob = make([]byte, size)
}
ensure(cap(blob) >= size, "wrapEntry blob size need bigger than entry marshal")
pos := 0
binary.LittleEndian.PutUint64(blob[pos:pos+EntryTimeStampFieldSizeOf], uint64(timestamp))
pos += EntryTimeStampFieldSizeOf
binary.LittleEndian.PutUint16(blob[pos:pos+EntryKeyFieldSizeOf], uint16(len(key)))
pos += EntryKeyFieldSizeOf
binary.LittleEndian.PutUint16(blob[pos:pos+EntryValueFieldSizeOf], uint16(len(val)))
pos += EntryValueFieldSizeOf
copy(blob[pos:], key)
pos += len(key)
copy(blob[pos:], val)
pos += len(val)
return blob
}
// 返回位置正好是val部分的起始位置
func readKey(blob []byte) []byte {
pos := EntryTimeStampFieldSizeOf
keySize := binary.LittleEndian.Uint16(blob[pos : pos+EntryKeyFieldSizeOf])
pos += EntryKeyFieldSizeOf + EntryValueFieldSizeOf
return blob[pos : pos+int(keySize)]
}
func readValue(blob []byte, keySize uint16) []byte {
pos := EntryTimeStampFieldSizeOf + EntryKeyFieldSizeOf
valueSize := binary.LittleEndian.Uint16(blob[pos : pos+EntryValueFieldSizeOf])
pos += EntryValueFieldSizeOf + int(keySize)
return blob[pos : pos+int(valueSize)]
}
func readTimeStamp(blob []byte) int64 {
pos := 0
timestamp := binary.LittleEndian.Uint64(blob[pos : pos+EntryTimeStampFieldSizeOf])
return int64(timestamp)
}
|
package api
import "time"
type Project struct {
Name string `json:"name"`
TotalTime time.Duration `json:"totalTime"`
}
type ProjectsGetResponse struct {
Projects []Project `json:"projects"`
}
|
package jvmstack
import "jean/rtda/heap"
func NewShimFrame(thread *Thread, ops *OperandStack) *Frame {
return &Frame{
thread: thread,
method: heap.ShimReturnMethod(),
operandStack: ops,
}
}
|
package main
import fmt "fmt"
// reverse reverses the contents of s in place
func reverse(s []int) {
for i := 0; i < len(s)/2; i++ {
s[i], s[len(s)-1-i] = s[len(s)-1-i], s[i]
}
}
func main() {
s0 := []int{1, 2, 3}
s1 := []int{1, 2, 3, 4, 5}
s2 := []int{99, 55, 33, 00, 11, 4, 5, 6, 7}
fmt.Println(s0, s1, s2)
//do reverse
reverse(s0)
reverse(s1)
reverse(s2)
fmt.Println(s0, s1, s2)
}
|
package draw
import (
"fmt"
"hello-again-go/config"
"image"
"github.com/fogleman/gg"
)
// Drawer interface
type Drawer interface {
DrawText(text string) (image.Image, error)
}
// GGDrawer implements Drawer interface
type GGDrawer struct {
imgWidth int
imgHeight int
}
var _ Drawer = (*GGDrawer)(nil)
// NewDrawer return an implementation of Drawer based on 'drawerType' value
func NewDrawer(conf config.Config) (Drawer, error) {
if conf.DrawerType == config.GGDRAWER {
ggDrawer := new(GGDrawer)
ggDrawer.imgWidth = conf.ImgHeight
ggDrawer.imgHeight = conf.ImgWidth
return ggDrawer, nil
}
if conf.DrawerType == config.MOCKDRAWER {
mockDrawer := new(MockDrawer)
mockDrawer.imgWidth = conf.ImgHeight
mockDrawer.imgHeight = conf.ImgWidth
return mockDrawer, nil
}
return nil, fmt.Errorf("No drawer found for type '%s'", conf.DrawerType)
}
// DrawText return a gray coloured image
// with the passed text written at the center
func (d GGDrawer) DrawText(text string) (image.Image, error) {
dc := gg.NewContext(d.imgWidth, d.imgHeight)
dc.SetRGB(1, 1, 1)
dc.Clear()
dc.SetRGB(0, 0, 0)
dc.DrawStringAnchored(text,
float64(d.imgWidth/2),
float64(d.imgHeight/2),
0.5,
0.5,
)
return dc.Image(), nil
}
|
package Nth_Digit
import "strconv"
func findNthDigit(n int) int {
charSize, baseNum, period := 1, 9, 1
for n > charSize*baseNum*period {
n -= charSize * baseNum * period
charSize++
period *= 10
}
return int(strconv.Itoa(period + (n-1)/charSize)[(n-1)%charSize] - '0')
}
|
package app
import (
"fmt"
)
const (
errRunnerExist = "runner exist. Received: %s"
)
type Config struct {
Name string
}
type App struct {
runners []*Runner
Name string
}
//New create new app instance
func New(c Config) *App {
return &App{
Name: c.Name,
}
}
func (a *App) Service(runs ...Runner) error {
for _, run := range runs {
if a.runnerExist(run) {
return fmt.Errorf(errRunnerExist, run.Name())
}
a.runners = append(a.runners, &run)
}
return nil
}
func (a *App) Start() {
fmt.Println("Starting app\n", a.Name)
}
func (a *App) runnerExist(r Runner) bool {
for _, run := range a.runners {
if run == &r {
return true
}
}
return false
} |
package page
import (
"database/sql"
"github.com/Miniand/venditio/asset"
"github.com/Miniand/venditio/core"
"github.com/Miniand/venditio/persist"
"github.com/Miniand/venditio/template"
"github.com/Miniand/venditio/web"
"github.com/gorilla/mux"
"net/http"
)
func Register(v *core.Venditio) {
registerSchema(v)
registerAssets(v)
registerRoutes(v)
}
func registerSchema(v *core.Venditio) {
schema := v.MustGet(persist.DEP_SCHEMA).(*persist.Schema)
t := schema.Table("pages")
t.AddColumn("url", &persist.Column{
Type: &persist.String{},
NotNull: true,
})
t.AddColumn("title", &persist.Column{
Type: &persist.String{},
NotNull: true,
})
t.AddColumn("body", &persist.Column{
Type: &persist.Text{},
NotNull: true,
})
t.AddIndex([]string{"title"})
}
func registerAssets(v *core.Venditio) {
v.MustGet(asset.DEP_ASSET).(asset.Resolver).AddPackagePath(
"github.com/Miniand/venditio/page/assets")
}
func registerRoutes(v *core.Venditio) {
schema := v.MustGet(persist.DEP_SCHEMA).(*persist.Schema)
router := v.MustGet(web.DEP_ROUTER).(*mux.Router)
router.HandleFunc("/pages/{url}", func(w http.ResponseWriter,
r *http.Request) {
db := v.MustGet(persist.DEP_DB).(*sql.DB)
tmpl := v.MustGet(template.DEP_TEMPLATE).(template.Templater)
vars := mux.Vars(r)
rows, err := db.Query("SELECT * FROM pages WHERE url=?",
vars["url"])
if err != nil {
panic(err.Error())
}
models, err := persist.RowsToModels(rows, schema.Table("pages"))
if err != nil {
panic(err.Error())
}
if len(models) == 0 {
return
}
err = tmpl.Render(w, "page.tmpl", map[string]interface{}{
"title": models[0]["title"],
"body": models[0]["body"],
})
if err != nil {
panic(err.Error())
}
})
}
|
package canary
import (
"fmt"
"github.com/Shopify/sarama"
"github.com/mailgun/oxy/utils"
"github.com/mailgun/vulcand/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/mailgun/vulcand/plugin"
"github.com/satori/go.uuid"
"io"
"log"
"time"
"encoding/json"
"net/http"
)
const Type = "canary"
func GetSpec() *plugin.MiddlewareSpec {
return &plugin.MiddlewareSpec{
Type: Type, // A short name for the middleware
FromOther: FromOther, // Tells vulcand how to create middleware from another one
FromCli: FromCli, // Tells vulcand how to create middleware from CLI
CliFlags: CliFlags(), // Vulcand will add this flags CLI command
}
}
type CanaryMiddleware struct {
BrokerList []string
}
type Canary struct {
cfg CanaryMiddleware
next http.Handler
}
func (c *Canary) ServeHTTP(w http.ResponseWriter, req *http.Request) {
start := time.Now()
pw := &utils.ProxyWriter{W: w}
c.next.ServeHTTP(pw, req)
l := c.newRecord(req, pw, time.Since(start))
if err := json.NewEncoder(c.writer).Encode(l); err != nil {
c.log.Errorf("Failed to marshal request: %v", err)
}
}
func New(brokers []string) (*CanaryMiddleware, error) {
if len(brokers) < 1 {
return nil, fmt.Errorf("Cannot have an empty broker list")
}
return &CanaryMiddleware{BrokerList: brokers}, nil
}
func (c *CanaryMiddleware) NewHandler(next http.Handler) (http.Handler, error) {
return &CanaryHandler{next: next, cfg: *c}, nil
}
func (c *CanaryMiddleware) String() string {
return fmt.Sprintf("brokers=%v", c.BrokerList)
}
func FromOther(c CanaryMiddleware) (plugin.Middleware, error) {
return New(c.BrokerList)
}
func FromCli(c *cli.Context) (plugin.Middleware, error) {
return New(c.StringSlice("brokers"))
}
func CliFlags() []cli.Flag {
empty := make(cli.StringSlice, 0)
return []cli.Flag{
cli.StringSliceFlag{"brokers", &empty, "Kafka Broker List", ""},
}
}
func newWriter(brokers []string) (io.Writer, error) {
dataCollector := newDataCollector(brokers)
return &kafkaWriter{s: dataCollector}, nil
}
type kafkaWriter struct {
s sarama.SyncProducer
}
func newDataCollector(brokerList []string) sarama.SyncProducer {
// For the data collector, we are looking for strong consistency semantics.
// Because we don't change the flush settings, sarama will try to produce messages
// as fast as possible to keep latency low.
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
// On the broker side, you may want to change the following settings to get
// stronger consistency guarantees:
// - For your broker, set `unclean.leader.election.enable` to false
// - For the topic, you could increase `min.insync.replicas`.
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
return producer
}
func (k *kafkaWriter) Write(val []byte) (int, error) {
u1 := uuid.NewV4()
_, _, err := k.s.SendMessage(&sarama.ProducerMessage{
Topic: "important",
Key: sarama.StringEncoder(u1.String()),
Value: sarama.ByteEncoder(val),
})
return 1, err
}
func (c *Canary) newRecord(req *http.Request, pw *utils.ProxyWriter, diff time.Duration) *Record {
return &Record{
Request: Request{
Method: req.Method,
URL: req.URL.String(),
TLS: newTLS(req),
BodyBytes: bodyBytes(req.Header),
Headers: captureHeaders(req.Header, t.reqHeaders),
Body: captureBody(req.Body),
},
Response: Response{
Code: pw.StatusCode(),
BodyBytes: bodyBytes(pw.Header()),
Roundtrip: float64(diff) / float64(time.Millisecond),
Headers: captureHeaders(pw.Header(), t.respHeaders),
Body: captureBody()
},
}
}
func captureBody(body io.ReadCloser) ([]byte, error) {
b, err := ioutil.ReadAll(body)
return b, err
}
func captureHeaders(in http.Header) http.Header {
if in == nil {
return nil
}
out := make(http.Header, len(in))
for h, vals := range in {
for i := range vals {
out.Add(h, vals[i])
}
}
return out
}
type Record struct {
Request Request `json:"request"`
Response Response `json:"response"`
}
type Request struct {
Method string `json:"method"` // Method - request method
BodyBytes int64 `json:"body_bytes"` // BodyBytes - size of request body in bytes
URL string `json:"url"` // URL - Request URL
Headers http.Header `json:"headers,omitempty"` // Headers - optional request headers, will be recorded if configured
TLS *TLS `json:"tls,omitempty"` // TLS - optional TLS record, will be recorded if it's a TLS connection
Body string `json:"body"` // Body of the Request
}
// Resp contains information about HTTP response
type Response struct {
Code int `json:"code"` // Code - response status code
Roundtrip float64 `json:"roundtrip"` // Roundtrip - round trip time in milliseconds
Headers http.Header `json:"headers,omitempty"` // Headers - optional headers, will be recorded if configured
BodyBytes int64 `json:"body_bytes"` // BodyBytes - size of response body in bytes
Body string `json:"body"` // Body sent back to the requesting client
}
// TLS contains information about this TLS connection
type TLS struct {
Version string `json:"version"` // Version - TLS version
Resume bool `json:"resume"` // Resume tells if the session has been re-used (session tickets)
CipherSuite string `json:"cipher_suite"` // CipherSuite contains cipher suite used for this connection
Server string `json:"server"` // Server contains server name used in SNI
}
|
package model
import "github.com/dgrijalva/jwt-go"
type User struct {
ID int `json:"id" validate:"numeric,gte=0"`
Username string `json:"username" validate:"required,min=3,max=32"`
Password string `json:"password,omitempty"` //todo better password
}
type UserToken struct {
UserID string `json:"id"`
Username string `json:"username"`
jwt.StandardClaims
}
type UserLogin struct {
Username string `json:"username"`
Password string `json:"password"`
}
type Users struct {
Users []User
}
type UserWallet struct {
Username string
Balance int
}
|
package models
// IstioConfigList istioConfigList
//
// This type is used for returning a response of IstioConfigList
//
// swagger:model IstioConfigList
type IstioConfigList struct {
// The namespace of istioConfiglist
//
// required: true
Namespace Namespace `json:"namespace"`
Gateways Gateways `json:"gateways"`
VirtualServices VirtualServices `json:"virtualServices"`
DestinationRules DestinationRules `json:"destinationRules"`
ServiceEntries ServiceEntries `json:"serviceEntries"`
Rules IstioRules `json:"rules"`
Adapters IstioAdapters `json:"adapters"`
Templates IstioTemplates `json:"templates"`
QuotaSpecs QuotaSpecs `json:"quotaSpecs"`
QuotaSpecBindings QuotaSpecBindings `json:"quotaSpecBindings"`
Policies Policies `json:"policies"`
MeshPolicies MeshPolicies `json:"meshPolicies"`
ClusterRbacConfigs ClusterRbacConfigs `json:"clusterRbacConfigs"`
RbacConfigs RbacConfigs `json:"rbacConfigs"`
ServiceRoles ServiceRoles `json:"serviceRoles"`
ServiceRoleBindings ServiceRoleBindings `json:"serviceRoleBindings"`
Sidecars Sidecars `json:"sidecars"`
IstioValidations IstioValidations `json:"validations"`
}
type IstioConfigDetails struct {
Namespace Namespace `json:"namespace"`
ObjectType string `json:"objectType"`
Gateway *Gateway `json:"gateway"`
VirtualService *VirtualService `json:"virtualService"`
DestinationRule *DestinationRule `json:"destinationRule"`
ServiceEntry *ServiceEntry `json:"serviceEntry"`
Rule *IstioRule `json:"rule"`
Adapter *IstioAdapter `json:"adapter"`
Template *IstioTemplate `json:"template"`
QuotaSpec *QuotaSpec `json:"quotaSpec"`
QuotaSpecBinding *QuotaSpecBinding `json:"quotaSpecBinding"`
Policy *Policy `json:"policy"`
MeshPolicy *MeshPolicy `json:"meshPolicy"`
ClusterRbacConfig *ClusterRbacConfig `json:"clusterRbacConfig"`
RbacConfig *RbacConfig `json:"rbacConfig"`
ServiceRole *ServiceRole `json:"serviceRole"`
ServiceRoleBinding *ServiceRoleBinding `json:"serviceRoleBinding"`
Sidecar *Sidecar `json:"sidecar"`
Permissions ResourcePermissions `json:"permissions"`
IstioValidation *IstioValidation `json:"validation"`
}
// ResourcePermissions holds permission flags for an object type
// True means allowed.
type ResourcePermissions struct {
Create bool `json:"create"`
Update bool `json:"update"`
Delete bool `json:"delete"`
}
|
package main
import "sort"
//1051. 高度检查器
//学校打算为全体学生拍一张年度纪念照。根据要求,学生需要按照 非递减 的高度顺序排成一行。
//
//排序后的高度情况用整数数组 expected 表示,其中 expected[i] 是预计排在这一行中第 i 位的学生的高度(下标从 0 开始)。
//
//给你一个整数数组 heights ,表示 当前学生站位 的高度情况。heights[i] 是这一行中第 i 位学生的高度(下标从 0 开始)。
//
//返回满足 heights[i] != expected[i] 的 下标数量 。
//
//
//
//示例:
//
//输入:heights =[1,1,4,2,1,3]
//输出:3
//解释:
//高度:[1,1,4,2,1,3]
//预期:[1,1,1,2,3,4]
//下标 2 、4 、5 处的学生高度不匹配。
//示例 2:
//
//输入:heights = [5,1,2,3,4]
//输出:5
//解释:
//高度:[5,1,2,3,4]
//预期:[1,2,3,4,5]
//所有下标的对应学生高度都不匹配。
//示例 3:
//
//输入:heights = [1,2,3,4,5]
//输出:0
//解释:
//高度:[1,2,3,4,5]
//预期:[1,2,3,4,5]
//所有下标的对应学生高度都匹配。
//
//
//提示:
//
//1 <= heights.length <= 100
//1 <= heights[i] <= 100
func heightChecker(heights []int) int {
expects := append([]int{},heights...)
sort.Ints(expects)
var result int
for i, v := range heights {
if v != expects[i] {
result++
}
}
return result
}
|
package gostat
import (
"fmt"
"math"
"math/rand"
"testing"
)
func TestDump(t *testing.T) {
data := []int{2, 5, 4, 7, 1, 6, 1, 4, 4, 4, 2, -20, 40, -50, 100, -70}
fmt.Println("Input is : ", data)
s := NewStat(10)
// fmt.Println(s)
for _, i := range data {
s.Add(i)
// fmt.Println(s)
}
fmt.Println(s)
}
func BenchmarkAdd0(b *testing.B) {
s := NewStat(20)
b.ResetTimer()
for i := 0.; i < float64(b.N); i++ {
s.add(i)
}
}
func BenchmarkAdd1(b *testing.B) {
s := NewStat(20)
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.Add(i)
}
}
func TestDist1(t *testing.T) {
fmt.Println("Uniform distribution")
s := NewStat(20)
for i := 0; i < 10_000; i++ {
s.Add(rand.Float64())
}
fmt.Println(s)
}
func TestDist2(t *testing.T) {
fmt.Println("Two separate uniform distribution")
s := NewStat(30)
for i := 0; i < 10_000; i++ {
s.Add(rand.Float64()*1000. - 5000.)
s.Add(rand.Float64() * 10000.)
}
fmt.Println(s)
c, _, _ := s.CountMeanVar()
if c != s.Count() {
t.Fatal("Direct count not providing same result as combined count")
}
}
func TestDist3(t *testing.T) {
fmt.Println("Skewd distribution")
s := NewStat(30)
for i := 0; i < 100_000; i++ {
s.Add(-math.Log(1 - rand.Float64()))
}
fmt.Println(s)
}
func TestRepartNormal(t *testing.T) {
s := NewStat(10)
for i := 0; i < 100; i++ {
s.Add(200. + 60.*rand.NormFloat64())
}
fmt.Println(s)
for i := -1000.; i < 1000.; i += 20 {
fmt.Printf("%5f\t==REPART==>\t%5f\n", i, s.NRepart(i))
}
}
func TestRepartUniform(t *testing.T) {
s := NewStat(10)
for i := 0; i < 100; i++ {
s.Add(200. + 60.*float64(i))
}
fmt.Println(s)
for i := -1000.; i < 1000.; i += 20 {
fmt.Printf("%5f\t==REPART==>\t%5f\n", i, s.NRepart(i))
}
}
|
//Package game is the core of the game.
package game
import (
"time"
"github.com/go-gl/glfw/v3.1/glfw"
)
type BaseEngine struct {
run bool
window *glfw.Window
}
//UpdateState is an interface that means that the object is updatable. This is
//necesary for a Engine substruct implementation
type UpdateState interface {
update()
}
//RenderScene is an interface that means that the object is rendarable. This is
//necesary for a Engine substruct implementation
type RenderScene interface {
render(time int64)
}
//Looper is a union of UpdateState and RenderScene
type Looper interface {
UpdateState
RenderScene
}
//Loop is an implementation of a main loop
func Loop(window *glfw.Window) {
newEngine := BaseEngine{run: true, window: window}
newEngine.loop(&newEngine)
}
func (engine *BaseEngine) loop(looper Looper) {
engine.run = true
const updatePerSeconds = 25
const nsPerUpdate = 1000000 / updatePerSeconds
latestTime := time.Now()
for !engine.window.ShouldClose() {
now := time.Now()
elapsedTime := now.Sub(latestTime)
latestTime = now
for elapsedTime.Nanoseconds() > nsPerUpdate {
looper.update()
elapsedTime = time.Duration(elapsedTime.Nanoseconds()-nsPerUpdate) * time.Nanosecond
}
looper.render(elapsedTime.Nanoseconds())
}
}
func (engine *BaseEngine) update() {
glfw.PollEvents()
}
func (engine *BaseEngine) render(time int64) {
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println("OK Let's GO!")
//fmt.Printf("Hello, 世界\n")
}
|
package configuration
type Configurations struct {
Configs []Configuration `json:"configurations"`
}
// GetFirst retrieves the first configuration from the Configurations object.
// If the length Configurations object's Configs is less than 1 it returns an
// uninitialized struct.
func (cs Configurations) GetFirst() (config Configuration) {
if len(cs.Configs) > 0 {
return cs.Configs[0]
}
return config
}
// GetPage will return a Configuration slice of the Configurations that would
// be on the defined page. Note: pageNum are 0 indexed.
func GetPage(configs []Configuration, pageNum, perPage int) []Configuration {
start := pageNum * perPage
end := start + perPage
if start > len(configs) || pageNum < 0 || perPage < 0 {
return make([]Configuration, 0)
}
if end > len(configs) {
end = len(configs)
}
configsPage := make([]Configuration, end-start)
copy(configsPage, configs[start:end])
return configsPage
}
// Equals determines if two Configuration slices are equal
func Equals(x, y []Configuration) bool {
if len(x) != len(y) {
return false
}
for _, configX := range x {
var found bool
for _, configY := range y {
if EqualConfigurations(configX, configY) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
|
package db
import (
"errors"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"sort"
"strings"
"time"
)
type MongoDB struct {
session *mgo.Session
}
type mongoUser struct {
GoogleId string `bson:"google_id"`
UserName string `bson:"username"`
}
type mongoTopic struct {
Id bson.ObjectId `bson:"_id"`
Name string `bson:"name"`
Description string `bson:"description"`
User mongoUser `bson:"user"`
Created time.Time `bson:"created"`
}
func (mongotopic mongoTopic) ToTopic() *Topic {
return &Topic{
Id: mongotopic.Id.Hex(),
Name: mongotopic.Name,
Description: mongotopic.Description,
User: User{
GoogleId: mongotopic.User.GoogleId,
Username: mongotopic.User.UserName,
},
Created: mongotopic.Created,
}
}
type mongoQuestion struct {
Id bson.ObjectId `bson:"_id"`
TopicId bson.ObjectId `bson:"topic"`
Question string `bson:"question"`
User mongoUser `bson:"user"`
Created time.Time `bson:"created"`
}
func (mongoquestion mongoQuestion) ToQuestion() *Question {
return &Question{
Id: mongoquestion.Id.Hex(),
TopicId: mongoquestion.TopicId.Hex(),
Question: mongoquestion.Question,
User: User{
GoogleId: mongoquestion.User.GoogleId,
Username: mongoquestion.User.UserName,
},
Created: mongoquestion.Created,
}
}
func NewMongoDB(mongoURL string) (DB, error) {
session, err := mgo.Dial(mongoURL)
if err != nil {
return nil, err
}
return &MongoDB{session}, nil
}
func (mongodb *MongoDB) NewTopic(name, description string, user *User) (*Topic, error) {
if strings.Trim(name, " ") == "" {
return nil, errors.New("Name must be provided")
}
session := mongodb.session.Copy()
defer session.Close()
topic := mongoTopic{
Id: bson.NewObjectId(),
Name: name,
Description: description,
User: mongoUser{
GoogleId: user.GoogleId,
UserName: user.Username,
},
Created: time.Now(),
}
err := session.DB("").C("topics").Insert(topic)
return topic.ToTopic(), err
}
func (mongodb *MongoDB) TopicById(Id string) (*Topic, error) {
if !bson.IsObjectIdHex(Id) {
return nil, nil
}
session := mongodb.session.Copy()
defer session.Close()
result := mongoTopic{}
query := session.DB("").C("topics").Find(bson.M{"_id": bson.ObjectIdHex(Id)})
if err := query.One(&result); err != nil {
return nil, err
}
return result.ToTopic(), nil
}
func (mongodb *MongoDB) NewQuestion(topicId, question string, user *User) (*Question, error) {
if strings.Trim(question, " ") == "" {
return nil, errors.New("Question must be provided")
}
if !bson.IsObjectIdHex(topicId) {
return nil, errors.New("Invalid ObjectId")
}
session := mongodb.session.Copy()
defer session.Close()
mongoquestion := mongoQuestion{
Id: bson.NewObjectId(),
TopicId: bson.ObjectIdHex(topicId),
Question: question,
User: mongoUser{
GoogleId: user.GoogleId,
UserName: user.Username,
},
Created: time.Now(),
}
err := session.DB("").C("questions").Insert(mongoquestion)
return mongoquestion.ToQuestion(), err
}
type sortableQuestions []*Question
func (s sortableQuestions) Len() int {
return len(s)
}
func (s sortableQuestions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortableQuestions) Less(i, j int) bool {
if s[i].Votes == s[j].Votes {
return s[i].Created.Unix() > s[j].Created.Unix()
} else {
return s[i].Votes > s[j].Votes
}
}
func (mongodb *MongoDB) QuestionsForTopic(topicId string, user *User) ([]*Question, error) {
if !bson.IsObjectIdHex(topicId) {
return nil, errors.New("Invalid ObjectId")
}
session := mongodb.session.Copy()
defer session.Close()
iter := session.DB("").C("questions").Find(bson.M{"topic": bson.ObjectIdHex(topicId)}).Iter()
questions := sortableQuestions{}
var q mongoQuestion
for iter.Next(&q) {
question := q.ToQuestion()
count, err := session.DB("").C("votes").Find(bson.M{"question": bson.ObjectIdHex(question.Id)}).Count()
if err != nil {
continue
}
question.Votes = count + 1 // All questions default to 1 vote - the user who created it.
if user != nil {
count, err = session.DB("").C("votes").Find(bson.M{"question": bson.ObjectIdHex(question.Id), "user.google_id": user.GoogleId}).Count()
if err != nil {
continue
}
question.UserCanVote = count == 0
}
questions = append(questions, question)
}
sort.Sort(questions)
if err := iter.Close(); err != nil {
return questions, err
}
return questions, nil
}
func (mongodb *MongoDB) TopicsByUser(user *User) ([]*Topic, error) {
session := mongodb.session.Copy()
defer session.Close()
iter := session.DB("").C("topics").Find(bson.M{"user.google_id": user.GoogleId}).Iter()
var topic mongoTopic
topics := []*Topic{}
for iter.Next(&topic) {
topics = append(topics, topic.ToTopic())
}
if err := iter.Close(); err != nil {
return topics, err
}
return topics, nil
}
func (mongodb *MongoDB) VoteForQuestion(questionId string, user *User) error {
session := mongodb.session.Copy()
defer session.Close()
if !bson.IsObjectIdHex(questionId) {
return nil // Fail silently
}
vote := bson.M{
"user": mongoUser{
GoogleId: user.GoogleId,
UserName: user.Username,
},
"question": bson.ObjectIdHex(questionId),
}
op := mgo.Change{
Update: vote,
Upsert: true,
}
var out mgo.ChangeInfo
_, err := session.DB("").C("votes").Find(vote).Apply(op, &out)
return err
}
func (mongodb *MongoDB) UnvoteForQuestion(questionId string, user *User) error {
session := mongodb.session.Copy()
defer session.Close()
if !bson.IsObjectIdHex(questionId) {
return nil // Fail silently
}
query := bson.M{
"user.google_id": user.GoogleId,
"question": bson.ObjectIdHex(questionId),
}
var out mgo.ChangeInfo
_, err := session.DB("").C("votes").Find(query).Apply(mgo.Change{Remove: true}, &out)
return err
}
|
// Copyright (c) 2017 Kuguar <licenses@kuguar.io> Author: Adrian P.K. <apk@kuguar.io>
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package controllers
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/adrianpk/fundacja/app"
"github.com/adrianpk/fundacja/logger"
"github.com/adrianpk/fundacja/models"
"net/http"
"net/url"
"path"
_ "github.com/lib/pq" // Import pq without side effects
"github.com/adrianpk/fundacja/repo"
)
// GetUserRoles - Returns a collection containing all userRoles.
// Handler for HTTP Get - "/organizations/{organization}/user-roles"
func GetUserRoles(w http.ResponseWriter, r *http.Request) {
// Get ID
vars := mux.Vars(r)
orgid := vars["organization"]
// Get repo
userRoleRepo, err := repo.MakeUserRoleRepository()
if err != nil {
app.ShowError(w, app.ErrEntityNotFound, err, http.StatusInternalServerError)
return
}
// Select
userRoles, err := userRoleRepo.GetAll(orgid)
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(UserRolesResource{Data: userRoles})
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusInternalServerError)
return
}
// Respond
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
w.Write(j)
}
// CreateUserRole - Creates a new UserRole.
// Handler for HTTP Post - "/organizations/{organization}/user-roles/create"
func CreateUserRole(w http.ResponseWriter, r *http.Request) {
// Decode
var res UserRoleResource
err := json.NewDecoder(r.Body).Decode(&res)
if err != nil {
app.ShowError(w, app.ErrRequestParsing, err, http.StatusInternalServerError)
return
}
userRole := &res.Data
// Get repo
userRoleRepo, err := repo.MakeUserRoleRepository()
if err != nil {
app.ShowError(w, app.ErrEntityCreate, err, http.StatusInternalServerError)
return
}
// Set values
u, _ := sessionUser(r)
userRole.CreatedBy = u.ID
genUserRoleNameAndDescription(userRole)
// Persist
err = userRoleRepo.Create(userRole)
if err != nil {
app.ShowError(w, app.ErrEntityCreate, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(UserRoleResource{Data: *userRole})
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusNoContent)
return
}
// Respond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
w.Write(j)
}
// GetUserRole - Returns a single UserRole by its id or userRoleName.
// Handler for HTTP Get - "/organizations/{organization}/user-roles/{user-role}"
func GetUserRole(w http.ResponseWriter, r *http.Request) {
GetUserRoleByID(w, r)
}
// GetUserRoleByID - Returns a single UserRole by its id.
// Handler for HTTP Get - "/organizations/{organization}/user-roles/{user-role}"
func GetUserRoleByID(w http.ResponseWriter, r *http.Request) {
// Get IDs
vars := mux.Vars(r)
orgid := vars["organization"]
id := vars["user-role"]
// Get repo
userRoleRepo, err := repo.MakeUserRoleRepository()
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Select
userRole, err := userRoleRepo.GetFromOrganization(id, orgid)
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(userRole)
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusInternalServerError)
return
}
// Repsond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(j)
}
// UpdateUserRole - Update an existing UserRole.
// Handler for HTTP Put - "/organizations/{organization}/user-roles/{user-role}"
func UpdateUserRole(w http.ResponseWriter, r *http.Request) {
// Get IDs
vars := mux.Vars(r)
orgid := vars["organization"]
id := vars["user-role"]
// Decode
var res UserRoleResource
err := json.NewDecoder(r.Body).Decode(&res)
if err != nil {
app.ShowError(w, app.ErrRequestParsing, err, http.StatusInternalServerError)
return
}
userRole := &res.Data
userRole.ID = models.ToNullsString(id)
// Get repo
userRoleRepo, err := repo.MakeUserRoleRepository()
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusInternalServerError)
return
}
// Check against current userRole
currentUserRole, err := userRoleRepo.GetFromOrganization(id, orgid)
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusUnauthorized)
return
}
// Avoid ID spoofing
err = verifyID(userRole.IdentifiableModel, currentUserRole.IdentifiableModel)
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusUnauthorized)
return
}
// Set values
genUserRoleName(userRole)
// Update
err = userRoleRepo.Update(userRole)
if err != nil {
app.ShowError(w, app.ErrEntityUpdate, err, http.StatusInternalServerError)
return
}
// Marshal
j, err := json.Marshal(UserRoleResource{Data: *userRole})
if err != nil {
app.ShowError(w, app.ErrResponseMarshalling, err, http.StatusNoContent)
return
}
// Respond
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusNoContent)
w.Write(j)
}
// DeleteUserRole - Deletes an existing UserRole
// Handler for HTTP Delete - "/organizations/{organization}/user-roles/{id}"
func DeleteUserRole(w http.ResponseWriter, r *http.Request) {
// Get IDs
vars := mux.Vars(r)
orgid := vars["organization"]
id := vars["user-role"]
// Get repo
userRoleRepo, err := repo.MakeUserRoleRepository()
if err != nil {
app.ShowError(w, app.ErrEntitySelect, err, http.StatusInternalServerError)
return
}
// Delete
err = userRoleRepo.DeleteFromOrganization(id, orgid)
if err != nil {
app.ShowError(w, app.ErrEntityDelete, err, http.StatusInternalServerError)
return
}
// Respond
w.WriteHeader(http.StatusNoContent)
}
// GenName - Generate a name for the UserRole.
func genUserRoleNameAndDescription(userRole *models.UserRole) error {
err := genUserRoleName(userRole)
if err != nil {
return err
}
genUserRoleDescription(userRole)
return nil
}
// genName - Generate a name for the UserRole.
func genUserRoleName(userRole *models.UserRole) error {
org, _ := getOrganization(userRole.OrganizationID.String)
user, _ := getUser(userRole.UserID.String)
role, _ := getRole(userRole.RoleID.String)
if org.Name.String != "" && user.Username.String != "" && role.Name.String != "" {
name := fmt.Sprintf("%s::%s::%s", org.Name.String, user.Username.String, role.Name.String)
userRole.Name = models.ToNullsString(name)
return nil
}
logger.Debug("Que tal")
return app.ErrEntitySetProperty
}
// genDescription - Generate a name for the UserRole.
func genUserRoleDescription(rp *models.UserRole) error {
if rp.Name.String != "" {
rp.Description = models.ToNullsString(fmt.Sprintf("[%s description]", rp.Name.String))
return nil
}
return app.ErrEntitySetProperty
}
func userRoleIDfromURL(r *http.Request) string {
u, _ := url.Parse(r.URL.Path)
dir := path.Dir(u.Path)
id := path.Base(dir)
logger.Debugf("UserRole id in url is %s", id)
return id
}
func userRoleNameFromURL(r *http.Request) string {
u, _ := url.Parse(r.URL.Path)
dir := path.Dir(u.Path)
userRoleName := path.Base(dir)
logger.Debugf("UserRoleName in url is %s", userRoleName)
return userRoleName
}
|
package main
import (
"fmt"
"github.com/spf13/cobra"
"os"
)
var rootCmd = &cobra.Command{
Use: "decoration",
Short: "Execute a program and add prefix suffix and color to its log output",
RunE: func(cmd *cobra.Command, args []string) error {
return runCommand()
},
}
var flags struct{
program string
prefix string
suffix string
color string
args string
}
var flagsName = struct{
program, programShort string
args, argsShort string
prefix, prefixShort string
suffix, suffixShort string
color, colorShort string
} {
"program", "e",
"args", "a",
"prefix", "p",
"suffix", "s",
"color", "c",
}
var print func(s string)
func main() {
rootCmd.Flags().StringVarP(
&flags.program,
flagsName.program,
flagsName.programShort,
"", "program to execute")
rootCmd.Flags().StringVarP(
&flags.args,
flagsName.args,
flagsName.argsShort,
"", "arguments of the program")
rootCmd.Flags().StringVarP(
&flags.prefix,
flagsName.prefix,
flagsName.prefixShort,
"", "prefix")
rootCmd.Flags().StringVarP(
&flags.suffix,
flagsName.suffix,
flagsName.suffixShort,
"", "suffix")
rootCmd.Flags().StringVarP(
&flags.color,
flagsName.color,
flagsName.colorShort,
"", "color : black, red, green, yellow, blue, magenta, cyan, white")
rootCmd.MarkFlagRequired("program")
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
} |
package version
import (
"encoding/hex"
"fmt"
"io/ioutil"
"strings"
"sync"
"github.com/google/logger"
"golang.org/x/crypto/blake2b"
)
const rootDir = "./static"
type FileSums map[string]string
var StaticSums FileSums
func init() {
ch := make(chan File, 1)
wg := &sync.WaitGroup{}
wg.Add(1)
go hashFiles(rootDir, ch, wg)
go func() {
wg.Wait()
close(ch)
}()
StaticSums = make(FileSums)
prefix := rootDir + "/"
for m := range ch {
k := strings.TrimPrefix(m.Path, prefix)
StaticSums[k] = m.Sum
}
}
type File struct {
Path, Sum string
}
func hashFile(p string, ch chan File, wg *sync.WaitGroup) {
d, err := ioutil.ReadFile(p)
if err != nil {
logger.Fatal(err)
}
sum := blake2b.Sum256(d)
ch <- File{p, hex.EncodeToString(sum[:])}
wg.Done()
}
func hashFiles(p string, ch chan File, wg *sync.WaitGroup) {
index, err := ioutil.ReadDir(p)
if err != nil {
logger.Fatal(err)
}
wg.Add(len(index))
for _, e := range index {
path := fmt.Sprintf("%s/%s", p, e.Name())
if e.IsDir() {
go hashFiles(path, ch, wg)
} else {
go hashFile(path, ch, wg)
}
}
wg.Done()
}
|
package common
const AzcopyVersion = "10.0.0-Preview"
const UserAgent = "AzCopy/v" + AzcopyVersion
|
package ukpolice
import (
"context"
"time"
)
// StopAndSearchService handles communication with the stop and search related
// method of the data.police.uk API.
type StopAndSearchService service
// Search holds information relating to individual stop and searches.
type Search struct {
ID int `json:"id"`
AgeRange string `json:"age_range"`
Type string `json:"type"`
Gender string `json:"gender"`
Outcome SearchOutcome `json:"outcome"`
InvolvedPerson bool `json:"involved_person"`
SelfDefinedEthnicity string `json:"self_defined_ethnicity"`
OfficerDefinedEthnicity string `json:"officer_defined_ethnicity"`
DateTime time.Time `json:"datetime"`
RemovalOfMoreThanOuterClothing bool `json:"removal_of_more_than_outer_clothing"`
Location Location `json:"location"`
Operation bool `json:"operation"`
OperationName string `json:"operation_name"`
OutcomeLinkedToObject bool `json:"outcome_linked_to_object_of_search"`
ObjectOfSearch string `json:"object_of_search"`
Legislation string `json:"legislation"`
// Force is not supplied natively by the API - if you want to record which
// force a search belongs to update this field after fetching.
Force string `json:"force"`
}
func (s Search) String() string {
return Stringify(s)
}
// SearchOutcome holds details of search outcomes. The 'outcome' result provided
// by the data.police.uk api returns both string and bool types, this struct
// and the custom UnmarshalJSON satisfy type security.
type SearchOutcome struct {
Desc string `json:"outcome_desc"`
SearchHappened bool `json:"searched"`
}
func (o SearchOutcome) String() string {
return Stringify(o)
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (o *SearchOutcome) UnmarshalJSON(b []byte) error {
s := string(b)
if s == "false" {
*o = SearchOutcome{SearchHappened: false, Desc: ""}
} else {
*o = SearchOutcome{SearchHappened: true, Desc: s}
}
return nil
}
// GetStopAndSearchesByArea returns stop and searches at street-level;
// either within a 1 mile radius of a single point, or within a custom area.
func (s *StopAndSearchService) GetStopAndSearchesByArea(ctx context.Context, opts ...Option) ([]Search, *Response, error) {
u := "stops-street"
u = addOptions(u, opts...)
req, err := s.api.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var searches []Search
resp, err := s.api.Do(ctx, req, &searches)
if err != nil {
return nil, nil, err
}
return searches, resp, nil
}
// GetStopAndSearchesByLocation returns stop and searches at a particular location.
func (s *StopAndSearchService) GetStopAndSearchesByLocation(ctx context.Context, opts ...Option) ([]Search, *Response, error) {
u := "stops-at-location"
u = addOptions(u, opts...)
req, err := s.api.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var searches []Search
resp, err := s.api.Do(ctx, req, &searches)
if err != nil {
return nil, nil, err
}
return searches, resp, nil
}
// GetStopAndSearchesWithNoLocation returns stop and searches with no location
// provided for a given police force.
func (s *StopAndSearchService) GetStopAndSearchesWithNoLocation(ctx context.Context, opts ...Option) ([]Search, *Response, error) {
u := "stops-no-location"
u = addOptions(u, opts...)
req, err := s.api.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var searches []Search
resp, err := s.api.Do(ctx, req, &searches)
if err != nil {
return nil, nil, err
}
return searches, resp, nil
}
// GetStopAndSearchesByForce returns stop and searches reported by a given police force.
func (s *StopAndSearchService) GetStopAndSearchesByForce(ctx context.Context, opts ...Option) ([]Search, *Response, error) {
u := "stops-force"
u = addOptions(u, opts...)
req, err := s.api.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var searches []Search
resp, err := s.api.Do(ctx, req, &searches)
if err != nil {
return nil, nil, err
}
return searches, resp, nil
}
|
package redis
import (
"github.com/go-kratos/kratos/pkg/cache/redis"
utilpaladin "way-jasy-cron/common/util/paladin"
)
type Manager struct {
Redis *redis.Pool
}
type Config struct {
Redis *redis.Config
}
func (c *Config) Filename() string {
return "redis.toml"
}
func New() *Manager{
c := &Config{}
utilpaladin.MustUnmarshalTOML(c)
return &Manager{
Redis: redis.NewPool(c.Redis),
}
} |
/*
Copyright 2019 The xridge kubestone contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8s
import "k8s.io/apimachinery/pkg/api/errors"
func ignoreByErrorFn(err error, errorFn func(error) bool) error {
if errorFn(err) {
return nil
}
return err
}
// IgnoreNotFound returns nil on k8s Not Found type of errors,
// but returns the error as-is otherwise
func IgnoreNotFound(err error) error {
return ignoreByErrorFn(err, errors.IsNotFound)
}
// IgnoreAlreadyExists returns nil on k8s Already Exists type of errors,
// but returns the error as-is otherwise
func IgnoreAlreadyExists(err error) error {
return ignoreByErrorFn(err, errors.IsAlreadyExists)
}
|
package agent
import (
"github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
v1gen "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// createBundle copies all targets from the GitRepo into TargetRestrictions. TargetRestrictions acts as a whitelist to prevent
// the creation of BundleDeployments from Targets created from the TargetCustomizations in the fleet.yaml
// we replicate this behaviour here since this is run in an integration tests that runs just the BundleController.
func createBundle(name, namespace string, bundleController v1gen.BundleController, targets []v1alpha1.BundleTarget, targetRestrictions []v1alpha1.BundleTarget) (*v1alpha1.Bundle, error) {
restrictions := []v1alpha1.BundleTargetRestriction{}
for _, r := range targetRestrictions {
restrictions = append(restrictions, v1alpha1.BundleTargetRestriction{
Name: r.Name,
ClusterName: r.ClusterName,
ClusterSelector: r.ClusterSelector,
ClusterGroup: r.ClusterGroup,
ClusterGroupSelector: r.ClusterGroupSelector,
})
}
bundle := v1alpha1.Bundle{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{"foo": "bar"},
},
Spec: v1alpha1.BundleSpec{
Targets: targets,
TargetRestrictions: restrictions,
},
}
return bundleController.Create(&bundle)
}
func createCluster(name, controllerNs string, clusterController v1gen.ClusterController, labels map[string]string, clusterNs string) (*v1alpha1.Cluster, error) {
cluster := v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: controllerNs,
Labels: labels,
},
}
c, err := clusterController.Create(&cluster)
if err != nil {
return nil, err
}
// Need to set the status.Namespace as it is needed to create a BundleDeployment.
// Namespace is set by the Cluster controller. We need to do it manually because we are running just the Bundle controller.
c.Status.Namespace = clusterNs
return clusterController.UpdateStatus(c)
}
func createClusterGroup(name, namespace string, clusterGroupController v1gen.ClusterGroupController, selector *metav1.LabelSelector) (*v1alpha1.ClusterGroup, error) {
cg := v1alpha1.ClusterGroup{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1alpha1.ClusterGroupSpec{
Selector: selector,
},
}
return clusterGroupController.Create(&cg)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.