text
stringlengths 11
4.05M
|
|---|
package main
import (
"bufio"
"github.com/ziutek/mymysql/autorc"
"io"
"log"
"net"
"os"
"strconv"
"strings"
"time"
)
// Message format (lines ended by CR or CRLF):
// FROM - symbol of source (<=16B)
// PHONE1[=DSTID1] PHONE2[=DSTID2] ... - list of phone numbers and dstIds
// Lines that contain optional parameters, one parameter per line: NAME or
// NAME VALUE. Implemented parameters:
// report - report required
// delete - delete message after sending (wait for reports, if required)
// - empty line
// Message body
// . - '.' as first and only character in line
// You can use optional dstIds to link recipients with your other data in db.
// Input represents source of messages
type Input struct {
smsd *SMSd
db *autorc.Conn
knownSrc []string
proto, addr string
ln net.Listener
outboxInsert, recipientsInsert autorc.Stmt
stop bool
}
func NewInput(smsd *SMSd, proto, addr string, db *autorc.Conn, src []string) *Input {
in := new(Input)
in.smsd = smsd
in.db = db
in.db.Register(setNames)
in.db.Register(createOutbox)
in.db.Register(createRecipients)
in.proto = proto
in.addr = addr
in.knownSrc = src
return in
}
const outboxInsert = `INSERT
` + outboxTable + `
SET
time=?,
src=?,
report=?,
del=?,
body=?
`
const recipientsInsert = `INSERT ` + recipientsTable + ` SET
msgId=?,
number=?,
dstId=?
`
func (in *Input) handle(c net.Conn) {
defer c.Close()
if !prepareOnce(in.db, &in.outboxInsert, outboxInsert) {
return
}
if !prepareOnce(in.db, &in.recipientsInsert, recipientsInsert) {
return
}
r := bufio.NewReader(c)
from, ok := readLine(r)
if !ok {
return
}
i := 0
for i < len(in.knownSrc) && in.knownSrc[i] != from {
i++
}
if i == len(in.knownSrc) {
log.Println("Unknown source:", from)
time.Sleep(5 * time.Second)
io.WriteString(c, "Unknown source\n")
return
}
tels, ok := readLine(r)
if !ok {
return
}
// Read options until first empty line
var del, report bool
for {
l, ok := readLine(r)
if !ok {
return
}
if l == "" {
break
}
switch l {
case "report":
report = true
case "delete":
del = true
}
}
// Read a message body
var body []byte
var prevIsPrefix bool
for {
buf, isPrefix, err := r.ReadLine()
if err != nil {
log.Print("Can't read message body: ", err)
return
}
if !isPrefix && !prevIsPrefix && len(buf) == 1 && buf[0] == '.' {
break
}
body = append(body, '\n')
body = append(body, buf...)
prevIsPrefix = isPrefix
}
// Insert message into Outbox
_, res, err := in.outboxInsert.Exec(time.Now(), from, report, del, body[1:])
if err != nil {
log.Printf("Can't insert message from %s into Outbox: %s", from, err)
// Send error response, ignore errors
io.WriteString(c, "DB error (can't insert message)\n")
return
}
msgId := uint32(res.InsertId())
// Save recipients for this message
for _, dst := range strings.Split(tels, " ") {
d := strings.SplitN(dst, "=", 2)
num := d[0]
if !checkNumber(num) {
log.Printf("Bad phone number: '%s' for message #%d.", num, msgId)
// Send error response, ignore errors
io.WriteString(c, "Bad phone number\n")
continue
}
var dstId uint64
if len(d) == 2 {
dstId, err = strconv.ParseUint(d[1], 0, 32)
if err != nil {
dstId = 0
log.Printf("Bad DstId=`%s` for number %s: %s", d[1], num, err)
// Send error response, ignore errors
io.WriteString(c, "Bad DstId\n")
}
}
_, _, err = in.recipientsInsert.Exec(msgId, num, uint32(dstId))
if err != nil {
log.Printf("Can't insert phone number %s into Recipients: %s", num, err)
// Send error response, ignore errors
io.WriteString(c, "DB error (can't insert phone number)\n")
}
}
// Send OK as response, ignore errors
io.WriteString(c, "OK\n")
// Inform SMSd about new message
in.smsd.NewMsg()
}
func (in *Input) loop() {
for {
c, err := in.ln.Accept()
if err != nil {
if in.stop {
return
}
log.Print("Can't accept connection: ", err)
if e, ok := err.(net.Error); !ok || !e.Temporary() {
return
}
}
go in.handle(c)
}
}
func (in *Input) Start() {
var err error
if in.proto == "unix" {
os.Remove(in.addr)
}
in.ln, err = net.Listen(in.proto, in.addr)
if err != nil {
log.Printf("Can't listen on %s: %s", in.addr, err)
os.Exit(1)
}
if in.proto == "unix" {
err = os.Chmod(in.addr, 0666)
if err != nil {
log.Printf("Can't chmod on unix socket %s: %s", in.addr, err)
os.Exit(1)
}
}
log.Println("Listen on:", in.proto, in.addr)
in.stop = false
go in.loop()
return
}
func (in *Input) Stop() {
in.stop = true
if err := in.ln.Close(); err != nil {
log.Println("Can't close listen socket:", err)
}
}
func (in *Input) String() string {
return in.proto + ":" + in.addr
}
|
package metrics
import (
"os"
"time"
)
type JSONData = map[string]interface{}
type KernelInfo struct {
rebootRequired bool
release string
}
type CVEInfo struct {
id string
packageName string
severity string
notFixedYet bool
fixState string
title string
summary string
referenceLinks []string
published string
lastModified string
mitigation string
}
type Report struct {
filename string
path string
serverName string
hostname string
kernel KernelInfo
cves []CVEInfo
}
type Metric struct {
prom interface{}
record func(metric Metric)
}
var (
reportsPath string
reportsDir []os.FileInfo
latestPath string
latestDir []os.FileInfo
reports []Report
reportedAt time.Time
metrics []Metric
)
|
package app
// Lifecycle support a bunch of APIs required for lifecycle
// Certain initialization order will be automatically calculated according to the dependency injection.
// Don't use circle dependency, otherwise it will cause unexpected behavior.
type Lifecycle interface {
PrepareInitialization() error
Initialize() error
PrepareDestruction() error
Destroy() error
}
type DefaultLifecycle struct {
}
func (d DefaultLifecycle) PrepareInitialization() error {
return nil
}
func (d DefaultLifecycle) Initialize() error {
return nil
}
func (d DefaultLifecycle) PrepareDestruction() error {
return nil
}
func (d DefaultLifecycle) Destroy() error {
return nil
}
|
package main
import "fmt"
var z = 40
func main() {
foo()
}
func foo() {
fmt.Println(z)
}
|
package main
import (
"context"
"database/sql"
"fmt"
"log"
"math/rand"
"time"
"github.com/Rican7/retry"
"github.com/Rican7/retry/backoff"
"github.com/Rican7/retry/jitter"
"github.com/Rican7/retry/strategy"
)
func insert(ctx context.Context, db *sql.DB, runnerId string) {
d := time.Now().Add(60 * time.Second)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer timeTrack(time.Now(), fmt.Sprintf("insert - %s", runnerId))
defer cancel()
now := time.Now()
nanos := now.UnixNano()
seed := time.Now().UnixNano()
random := rand.New(rand.NewSource(seed))
action := func(attempt uint) error {
var err error
if _, err = db.Exec(update, nanos, "anyvalue"); err != nil {
log.Printf("Insert [%s](attempt %d) - %s", runnerId, attempt, err.Error())
}
return err
}
err := retry.Retry(
action,
strategy.Limit(2000),
strategy.BackoffWithJitter(backoff.Linear(10*time.Millisecond), jitter.Deviation(random, 0.8)),
)
if err != nil {
log.Fatalf("Insert [%s] Error: %v", runnerId, err.Error())
}
//Check context for error, If ctx.Err() != nil gracefully exit the current execution
if ctx.Err() != nil {
log.Fatalf("insert [%s] - %v \n", runnerId, ctx.Err())
}
log.Printf("%s - Inserted and committed successfully.\n", runnerId)
}
func doInsert(ctx context.Context, db *sql.DB, runnerId string) {
ticker := time.NewTicker(10 * time.Millisecond)
for {
select {
case <-ticker.C:
insert(ctx, db, runnerId)
}
}
}
|
package bmcrypto
import (
"github.com/stretchr/testify/assert"
"io/ioutil"
"testing"
)
// Mock reader so we deterministic signature
type dummyReader struct{}
var (
signMessage = []byte("b2d31086f098254d32314438a863e61e")
)
func (d *dummyReader) Read(b []byte) (n int, err error) {
for i := range b {
b[i] = 1
}
return len(b), nil
}
func TestSignRSA(t *testing.T) {
randReader = &dummyReader{}
data, err := ioutil.ReadFile("../../testdata/privkey.rsa")
assert.NoError(t, err)
privKey, err := NewPrivKey(string(data))
assert.NoError(t, err)
data, err = ioutil.ReadFile("../../testdata/pubkey.rsa")
assert.NoError(t, err)
pubKey, err := NewPubKey(string(data))
assert.NoError(t, err)
sig, err := Sign(*privKey, signMessage)
assert.NoError(t, err)
assert.Equal(t, []byte{0x74, 0xc7, 0x7, 0x9a, 0x72, 0x7e, 0x1, 0xf2, 0xac, 0xa1, 0x56, 0x4e, 0x95, 0x97, 0x41, 0x60, 0xb6, 0x23, 0x13, 0x16, 0xda, 0x6b, 0xa1, 0xe1, 0x5d, 0x52, 0x37, 0xe2, 0x8b, 0xac, 0x55, 0x9, 0xac, 0xbe, 0x2d, 0xbd, 0x2f, 0xd4, 0xae, 0xdb, 0xd4, 0xf, 0xd6, 0xee, 0xfe, 0x75, 0x86, 0xf5, 0xca, 0xa5, 0x2f, 0x6f, 0xba, 0x24, 0xdb, 0x8a, 0xb3, 0xd5, 0x81, 0xeb, 0xbf, 0xe6, 0x71, 0xe4, 0xb1, 0x1b, 0x6e, 0x8a, 0x6e, 0x72, 0x6c, 0x5d, 0x27, 0x57, 0x24, 0xb7, 0x4e, 0xa5, 0xb1, 0xf5, 0x0, 0x84, 0xad, 0x99, 0x82, 0x88, 0xb7, 0x1a, 0x2a, 0x7e, 0xcc, 0x61, 0x6f, 0x77, 0x58, 0x3d, 0xda, 0x9, 0x18, 0xb, 0xfc, 0x3, 0x22, 0xf5, 0x3, 0x96, 0x44, 0xf5, 0x10, 0xf9, 0x20, 0xfc, 0x27, 0xb7, 0x47, 0xfe, 0xf7, 0x56, 0x4b, 0x98, 0x5c, 0xce, 0x13, 0xed, 0x11, 0x74, 0x8e, 0x4e}, sig)
res, err := Verify(*pubKey, signMessage, sig)
assert.NoError(t, err)
assert.True(t, res)
sig[0] ^= 0x80
res, _ = Verify(*pubKey, signMessage, sig)
assert.False(t, res)
}
func TestSignECDSA(t *testing.T) {
randReader = &dummyReader{}
data, err := ioutil.ReadFile("../../testdata/privkey.ecdsa")
assert.NoError(t, err)
privKey, err := NewPrivKey(string(data))
assert.NoError(t, err)
data, err = ioutil.ReadFile("../../testdata/pubkey.ecdsa")
assert.NoError(t, err)
pubKey, err := NewPubKey(string(data))
assert.NoError(t, err)
sig, err := Sign(*privKey, signMessage)
assert.NoError(t, err)
assert.Equal(t, []byte{0x30, 0x65, 0x2, 0x30, 0x2e, 0x0, 0x2b, 0x6e, 0x28, 0xb6, 0x9f, 0x2a, 0xb7, 0x80, 0x0, 0x76, 0xe2, 0x4b, 0x29, 0xb2, 0x46, 0xad, 0x88, 0x5e, 0x24, 0x51, 0xd6, 0xe7, 0xba, 0x80, 0x57, 0x19, 0x33, 0xbb, 0x1, 0x2d, 0x85, 0xd6, 0x3c, 0x10, 0xff, 0x9d, 0x52, 0x37, 0x73, 0x9a, 0xba, 0xa6, 0x5e, 0xd9, 0x3c, 0x81, 0x2, 0x31, 0x0, 0xd0, 0xd0, 0x3a, 0xc0, 0xd1, 0x54, 0x2e, 0x6b, 0x9f, 0xa1, 0x33, 0x78, 0x6a, 0x4f, 0x8e, 0x1, 0x8e, 0xed, 0x8, 0xd7, 0x9e, 0xed, 0xd7, 0x53, 0x56, 0xa7, 0x3b, 0xe5, 0xd7, 0x4b, 0xfa, 0xb5, 0xad, 0xa2, 0x7f, 0x4f, 0x91, 0x4, 0x65, 0x7a, 0xa3, 0x98, 0xc8, 0xcd, 0x1, 0xe6, 0x2d, 0x39}, sig)
res, err := Verify(*pubKey, signMessage, sig)
assert.NoError(t, err)
assert.True(t, res)
sig[0] ^= 0x80
res, _ = Verify(*pubKey, signMessage, sig)
assert.False(t, res)
}
func TestSignED25519(t *testing.T) {
randReader = &dummyReader{}
data, err := ioutil.ReadFile("../../testdata/privkey.ed25519")
assert.NoError(t, err)
privKey, err := NewPrivKey(string(data))
assert.NoError(t, err)
data, err = ioutil.ReadFile("../../testdata/pubkey.ed25519")
assert.NoError(t, err)
pubKey, err := NewPubKey(string(data))
assert.NoError(t, err)
sig, err := Sign(*privKey, signMessage)
assert.NoError(t, err)
assert.Equal(t, []byte{0x41, 0x5c, 0x11, 0xb4, 0x4a, 0x3a, 0xbc, 0x62, 0x6f, 0xe, 0x21, 0x7d, 0xd9, 0xee, 0x3e, 0x4a, 0x52, 0x9f, 0x2, 0xe5, 0x3f, 0xdb, 0xd6, 0xe7, 0xb3, 0xdd, 0xb2, 0x62, 0x66, 0x91, 0x42, 0x43, 0x4c, 0xbe, 0x7f, 0x2c, 0x8d, 0x48, 0xf7, 0xe2, 0x9a, 0xc2, 0xe5, 0x38, 0xc4, 0xc3, 0xd2, 0x2d, 0xcc, 0x60, 0xf5, 0x25, 0xec, 0xa9, 0x9, 0xb1, 0xa6, 0x5f, 0xe1, 0xfa, 0xe4, 0x14, 0xd0, 0x5}, sig)
res, err := Verify(*pubKey, signMessage, sig)
assert.NoError(t, err)
assert.True(t, res)
sig[0] ^= 0x80
res, _ = Verify(*pubKey, signMessage, sig)
assert.False(t, res)
}
|
package auth
import (
"github.com/atymkiv/echo_frame_learning/blog/model"
"github.com/labstack/echo"
"github.com/ribice/gorsk/pkg/utl/model"
"net/http"
)
var (
ErrInvalidCredentials = echo.NewHTTPError(http.StatusUnauthorized, "Username or password does not exist")
)
// New creates new iam service
func New(udb UserDB, j TokenGenerator, sec Securer) *Auth {
return &Auth{
udb: udb,
tg: j,
sec: sec,
}
}
// Service represents auth service interface
type Service interface {
Authenticate(echo.Context, string, string) (*blog.AuthToken, error)
}
// Auth represents auth application service
type Auth struct {
udb UserDB
tg TokenGenerator
sec Securer
}
// UserDB represents user repository interface
type UserDB interface {
FindByEmail(string) (*blog.User, error)
}
// TokenGenerator represents token generator (jwt) interface
type TokenGenerator interface {
GenerateToken(*blog.User) (string, string, error)
}
// Securer represents security interface
type Securer interface {
Token(string) string
}
// Authenticate tries to authenticate the user provided by email and password
func (a *Auth) Authenticate(c echo.Context, user, pass string) (*blog.AuthToken, error) {
u, err := a.udb.FindByEmail(user)
if err != nil {
return nil, err
}
token, expire, err := a.tg.GenerateToken(u)
if err != nil {
return nil, gorsk.ErrUnauthorized
}
return &blog.AuthToken{Token: token, Expires: expire, RefreshToken: u.Token}, nil
}
|
package dice
import (
"fmt"
"github.com/theshadow/dice/formula"
"strings"
"testing"
)
func TestDropExtension_New(t *testing.T) {
cases := []struct {
name string
which string
results Results
roll formula.Roll
expected string
}{
{
name: "Drop lowest without duplicates",
which: "lowest",
results: Results{Rolls: []int{1, 2, 3, 4, 5}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the lowest roll, new rolls: %v", []int{2, 3, 4, 5}),
},
{
name: "Drop lowest with duplicates",
which: "lowest",
results: Results{Rolls: []int{1, 1, 3, 4, 5}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the lowest roll, new rolls: %v", []int{1, 3, 4, 5}),
},
{
name: "Drop lowest with lowest in the middle",
which: "lowest",
results: Results{Rolls: []int{2, 3, 1, 4, 5}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the lowest roll, new rolls: %v", []int{2, 3, 4, 5}),
},
{
name: "Drop lowest with lowest does not return empty array",
which: "lowest",
results: Results{Rolls: []int{2, 9, 8, 2, 10}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the lowest roll, new rolls: %v", []int{9, 8, 2, 10}),
},
{
name: "Drop highest without duplicates",
which: "highest",
results: Results{Rolls: []int{1, 2, 3, 4, 5}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the highest roll, new rolls: %v", []int{1, 2, 3, 4}),
},
{
name: "Drop highest with duplicates",
which: "highest",
results: Results{Rolls: []int{1, 2, 3, 5, 5}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the highest roll, new rolls: %v", []int{1, 2, 3, 5}),
},
{
name: "Drop highest with highest in the middle",
which: "highest",
results: Results{Rolls: []int{1, 2, 5, 3, 4}},
roll: formula.Roll{
Count: 5,
Sides: 5,
Modifier: 0,
Extensions: make(map[string][]string),
},
expected: fmt.Sprintf("Dropping the highest roll, new rolls: %v", []int{1, 2, 3, 4}),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
ext := newDropxtension([]string{c.which})
actual, err := ext.Exec(c.results, c.roll)
if err != nil {
t.Fatalf("unexpected error while executing extension: %s", err)
}
if strings.Compare(c.expected, actual) != 0 {
t.Fatalf("Extension result doesn't match, expected '%s' and got '%s'", c.expected, actual)
}
})
}
}
|
package main
import (
"fmt"
"log"
)
type (
myError struct {
Code int
}
)
func (e *myError) Error() string {
return fmt.Sprintf("Error code: %v", e.Code)
}
func sandbox(protected func()) {
defer func() {
if x := recover(); x != nil {
switch x.(type) {
case string:
log.Printf("[sandbox] end for a panic: %v", x)
case error:
log.Printf("[sandbox] end for a panic: %v", x.(error).Error())
default:
log.Printf("[sandbox] end for a unexpected panic: %v", x)
}
log.Println("[sandbox] recovered and go on ...")
} else {
log.Printf("[sandbox] end")
}
}()
log.Println("[sandbox] start")
protected()
}
func main() {
sandbox(func() {
log.Println("[protected] raise a `panic`")
panic(&myError{-1})
// panic("any reason")
})
log.Println("[main] exit")
}
|
package domain
import (
"testing"
"github.com/DATA-DOG/go-sqlmock"
)
func Test_BookRepository_Create(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
bookRepository := NewBookRepository(db)
bookRepository.Initialize()
rows := sqlmock.NewRows([]string{"id", "title", "year", "createdByID"}).
AddRow("random_id", "title", "2020", "someid")
mock.ExpectQuery("SELECT (.+) FROM book (.+) LIMIT 1").WithArgs("random_id").WillReturnRows(rows)
_, getError := bookRepository.Get("random_id")
if getError != nil {
t.Fatalf("an error '%s' was not expected when querying", getError)
}
_, getError = bookRepository.Get("any_other_random_id")
if getError == nil {
t.Fatalf("should raise an error")
}
}
/*
type MockBroker struct {
}
func (m *MockBroker) Subscribe(channel string, cb func(string)) {
}
func (m *MockBroker) Set(key string, message interface{}, time time.Duration) error {
return nil
}
func (m *MockBroker) Publish(channel string, message interface{}) *redis.IntCmd {
intCmd := &redis.IntCmd{}
return intCmd
}
type MockUserRepository struct {
}
func (r *MockUserRepository) Save(user *domain.User) (*domain.User, common.CustomError) {
return nil, nil
}
*/
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statistics
import (
"math"
"strconv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/planner/util/debugtrace"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
// Column represents a column histogram.
type Column struct {
LastAnalyzePos types.Datum
CMSketch *CMSketch
TopN *TopN
FMSketch *FMSketch
Info *model.ColumnInfo
Histogram
// StatsLoadedStatus indicates the status of column statistics
StatsLoadedStatus
PhysicalID int64
Flag int64
StatsVer int64 // StatsVer is the version of the current stats, used to maintain compatibility
IsHandle bool
}
func (c *Column) String() string {
return c.Histogram.ToString(0)
}
// TotalRowCount returns the total count of this column.
func (c *Column) TotalRowCount() float64 {
if c.StatsVer >= Version2 {
return c.Histogram.TotalRowCount() + float64(c.TopN.TotalCount())
}
return c.Histogram.TotalRowCount()
}
// NotNullCount returns the count of this column which is not null.
func (c *Column) NotNullCount() float64 {
if c.StatsVer >= Version2 {
return c.Histogram.NotNullCount() + float64(c.TopN.TotalCount())
}
return c.Histogram.NotNullCount()
}
// GetIncreaseFactor get the increase factor to adjust the final estimated count when the table is modified.
func (c *Column) GetIncreaseFactor(realtimeRowCount int64) float64 {
columnCount := c.TotalRowCount()
if columnCount == 0 {
// avoid dividing by 0
return 1.0
}
return float64(realtimeRowCount) / columnCount
}
// MemoryUsage returns the total memory usage of Histogram, CMSketch, FMSketch in Column.
// We ignore the size of other metadata in Column
func (c *Column) MemoryUsage() CacheItemMemoryUsage {
var sum int64
columnMemUsage := &ColumnMemUsage{
ColumnID: c.Info.ID,
}
histogramMemUsage := c.Histogram.MemoryUsage()
columnMemUsage.HistogramMemUsage = histogramMemUsage
sum = histogramMemUsage
if c.CMSketch != nil {
cmSketchMemUsage := c.CMSketch.MemoryUsage()
columnMemUsage.CMSketchMemUsage = cmSketchMemUsage
sum += cmSketchMemUsage
}
if c.TopN != nil {
topnMemUsage := c.TopN.MemoryUsage()
columnMemUsage.TopNMemUsage = topnMemUsage
sum += topnMemUsage
}
if c.FMSketch != nil {
fmSketchMemUsage := c.FMSketch.MemoryUsage()
columnMemUsage.FMSketchMemUsage = fmSketchMemUsage
sum += fmSketchMemUsage
}
columnMemUsage.TotalMemUsage = sum
return columnMemUsage
}
// HistogramNeededItems stores the columns/indices whose Histograms need to be loaded from physical kv layer.
// Currently, we only load index/pk's Histogram from kv automatically. Columns' are loaded by needs.
var HistogramNeededItems = neededStatsMap{items: map[model.TableItemID]struct{}{}}
// IsInvalid checks if this column is invalid.
// If this column has histogram but not loaded yet,
// then we mark it as need histogram.
func (c *Column) IsInvalid(
sctx sessionctx.Context,
collPseudo bool,
) (res bool) {
var totalCount float64
var ndv int64
var inValidForCollPseudo, essentialLoaded bool
if sctx.GetSessionVars().StmtCtx.EnableOptimizerDebugTrace {
debugtrace.EnterContextCommon(sctx)
defer func() {
debugtrace.RecordAnyValuesWithNames(sctx,
"IsInvalid", res,
"InValidForCollPseudo", inValidForCollPseudo,
"TotalCount", totalCount,
"NDV", ndv,
"EssentialLoaded", essentialLoaded,
)
debugtrace.LeaveContextCommon(sctx)
}()
}
if collPseudo {
inValidForCollPseudo = true
return true
}
if sctx != nil {
stmtctx := sctx.GetSessionVars().StmtCtx
if c.IsLoadNeeded() && stmtctx != nil {
if stmtctx.StatsLoad.Timeout > 0 {
logutil.BgLogger().Warn("Hist for column should already be loaded as sync but not found.",
zap.String(strconv.FormatInt(c.Info.ID, 10), c.Info.Name.O))
}
// In some tests, the c.Info is not set, so we add this check here.
if c.Info != nil {
HistogramNeededItems.insert(model.TableItemID{TableID: c.PhysicalID, ID: c.Info.ID, IsIndex: false})
}
}
}
// In some cases, some statistics in column would be evicted
// For example: the cmsketch of the column might be evicted while the histogram and the topn are still exists
// In this case, we will think this column as valid due to we can still use the rest of the statistics to do optimize.
totalCount = c.TotalRowCount()
essentialLoaded = c.IsEssentialStatsLoaded()
ndv = c.Histogram.NDV
return totalCount == 0 || (!essentialLoaded && ndv > 0)
}
// ItemID implements TableCacheItem
func (c *Column) ItemID() int64 {
return c.Info.ID
}
// DropUnnecessaryData drops the unnecessary data for the column.
func (c *Column) DropUnnecessaryData() {
if c.StatsVer < Version2 {
c.CMSketch = nil
}
c.TopN = nil
c.Histogram.Bounds = chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeBlob)}, 0)
c.Histogram.Buckets = make([]Bucket, 0)
c.Histogram.Scalars = make([]scalar, 0)
c.evictedStatus = AllEvicted
}
// IsAllEvicted indicates whether all stats evicted
func (c *Column) IsAllEvicted() bool {
return c.statsInitialized && c.evictedStatus >= AllEvicted
}
// GetEvictedStatus indicates the evicted status
func (c *Column) GetEvictedStatus() int {
return c.evictedStatus
}
// IsStatsInitialized indicates whether stats is initialized
func (c *Column) IsStatsInitialized() bool {
return c.statsInitialized
}
// GetStatsVer indicates the stats version
func (c *Column) GetStatsVer() int64 {
return c.StatsVer
}
// IsCMSExist indicates whether CMSketch exists
func (c *Column) IsCMSExist() bool {
return c.CMSketch != nil
}
// AvgColSize is the average column size of the histogram. These sizes are derived from function `encode`
// and `Datum::ConvertTo`, so we need to update them if those 2 functions are changed.
func (c *Column) AvgColSize(count int64, isKey bool) float64 {
if count == 0 {
return 0
}
// Note that, if the handle column is encoded as value, instead of key, i.e,
// when the handle column is in a unique index, the real column size may be
// smaller than 8 because it is encoded using `EncodeVarint`. Since we don't
// know the exact value size now, use 8 as approximation.
if c.IsHandle {
return 8
}
histCount := c.TotalRowCount()
notNullRatio := 1.0
if histCount > 0 {
notNullRatio = 1.0 - float64(c.NullCount)/histCount
}
switch c.Histogram.Tp.GetType() {
case mysql.TypeFloat, mysql.TypeDouble, mysql.TypeDuration, mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp:
return 8 * notNullRatio
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear, mysql.TypeEnum, mysql.TypeBit, mysql.TypeSet:
if isKey {
return 8 * notNullRatio
}
}
// Keep two decimal place.
return math.Round(float64(c.TotColSize)/float64(count)*100) / 100
}
// AvgColSizeChunkFormat is the average column size of the histogram. These sizes are derived from function `Encode`
// and `DecodeToChunk`, so we need to update them if those 2 functions are changed.
func (c *Column) AvgColSizeChunkFormat(count int64) float64 {
if count == 0 {
return 0
}
fixedLen := chunk.GetFixedLen(c.Histogram.Tp)
if fixedLen != -1 {
return float64(fixedLen)
}
// Keep two decimal place.
// Add 8 bytes for unfixed-len type's offsets.
// Minus Log2(avgSize) for unfixed-len type LEN.
avgSize := float64(c.TotColSize) / float64(count)
if avgSize < 1 {
return math.Round(avgSize*100)/100 + 8
}
return math.Round((avgSize-math.Log2(avgSize))*100)/100 + 8
}
// AvgColSizeListInDisk is the average column size of the histogram. These sizes are derived
// from `chunk.ListInDisk` so we need to update them if those 2 functions are changed.
func (c *Column) AvgColSizeListInDisk(count int64) float64 {
if count == 0 {
return 0
}
histCount := c.TotalRowCount()
notNullRatio := 1.0
if histCount > 0 {
notNullRatio = 1.0 - float64(c.NullCount)/histCount
}
size := chunk.GetFixedLen(c.Histogram.Tp)
if size != -1 {
return float64(size) * notNullRatio
}
// Keep two decimal place.
// Minus Log2(avgSize) for unfixed-len type LEN.
avgSize := float64(c.TotColSize) / float64(count)
if avgSize < 1 {
return math.Round((avgSize)*100) / 100
}
return math.Round((avgSize-math.Log2(avgSize))*100) / 100
}
// StatusToString gets the string info of StatsLoadedStatus
func (s StatsLoadedStatus) StatusToString() string {
if !s.statsInitialized {
return "unInitialized"
}
switch s.evictedStatus {
case AllLoaded:
return "allLoaded"
case AllEvicted:
return "allEvicted"
}
return "unknown"
}
// IsAnalyzed indicates whether the column is analyzed.
// The set of IsAnalyzed columns is a subset of the set of StatsAvailable columns.
func (c *Column) IsAnalyzed() bool {
return c.GetStatsVer() != Version0
}
// StatsAvailable indicates whether the column stats are collected.
// Note:
// 1. The function merely talks about whether the stats are collected, regardless of the stats loaded status.
// 2. The function is used to decide StatsLoadedStatus.statsInitialized when reading the column stats from storage.
// 3. There are two cases that StatsAvailable is true:
// a. IsAnalyzed is true.
// b. The column is newly-added/modified and its stats are generated according to the default value.
func (c *Column) StatsAvailable() bool {
// Typically, when the column is analyzed, StatsVer is set to Version1/Version2, so we check IsAnalyzed().
// However, when we add/modify a column, its stats are generated according to the default value without setting
// StatsVer, so we check NDV > 0 || NullCount > 0 for the case.
return c.IsAnalyzed() || c.NDV > 0 || c.NullCount > 0
}
|
/*
* @lc app=leetcode.cn id=1413 lang=golang
*
* [1413] 逐步求和得到正数的最小值
*/
// @lc code=start
package main
func minStartValue(nums []int) int {
minSum := 0
sum := 0
for i := 0; i < len(nums); i++ {
sum += nums[i]
if sum < minSum {
minSum = sum
}
}
if minSum > 0 {
return 1
} else {
return 1 - minSum
}
}
// @lc code=end
|
package random
import "math/rand"
const (
numberBytes = "0123456789"
)
// Number generates random numbers.
func Number(n int) string {
b := make([]byte, n)
l := len(numberBytes)
for i := range b {
b[i] = numberBytes[rand.Intn(l)]
}
return string(b)
}
|
package proto
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseTopic(t *testing.T) {
input := [][]byte{
[]byte("/"),
[]byte("a/b"),
[]byte("a/b/"),
[]byte("/a/b"),
[]byte("a/b/c"),
[]byte("/a/b/c"),
[]byte("/asdf/bse/dewer"),
[]byte("/a"),
[]byte("/a//b"),
[]byte("/+/b/c"),
[]byte("/a/+/c"),
}
out := [][]uint32{
nil,
nil,
nil,
nil,
nil,
[]uint32{3238259379, 500706888, 1027807523},
[]uint32{1753631938, 324405670, 3531030695},
nil,
nil,
nil,
nil,
}
for i, v := range input {
ids, _ := ParseTopic(v, true)
assert.Equal(t, out[i], ids)
}
}
func TestAppidAndSendTag(t *testing.T) {
topic := []byte("/")
_, _, _, err := AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/a")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/a/b")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/a/b/c")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/1234567890/b/c")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/1234567890/1/c")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/1234567890/12/c")
appid, sendtag, typetag, err := AppidAndSendTag(topic)
assert.EqualValues(t, []byte("1234567890"), appid)
assert.EqualValues(t, '1', sendtag)
assert.EqualValues(t, '2', typetag)
topic = []byte("/1234567890/123/c")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
topic = []byte("/1234567890/1/")
_, _, _, err = AppidAndSendTag(topic)
assert.Error(t, err)
}
func TestGetTopicType(t *testing.T) {
topic := []byte("/1234567890/12/c")
tp := GetTopicType(topic)
assert.EqualValues(t, '2', tp)
}
|
package model
import (
"fmt"
"github.com/zhenghaoz/gorse/base"
"github.com/zhenghaoz/gorse/core"
"github.com/zhenghaoz/gorse/floats"
"math"
)
type _BiasUpdateCache struct {
cache map[int]float64
}
func _NewBiasUpdateCache() *_BiasUpdateCache {
cache := new(_BiasUpdateCache)
cache.cache = make(map[int]float64)
return cache
}
func (cache *_BiasUpdateCache) Add(index int, update float64) {
// Create if not exist
if _, exist := cache.cache[index]; !exist {
cache.cache[index] = 0
}
cache.cache[index] += update
}
func (cache *_BiasUpdateCache) Commit(bias []float64) {
for index, update := range cache.cache {
bias[index] += update
}
}
type _FactorUpdateCache struct {
nFactor int
cache map[int][]float64
}
func _NewFactorUpdateCache(nFactor int) *_FactorUpdateCache {
cache := new(_FactorUpdateCache)
cache.nFactor = nFactor
cache.cache = make(map[int][]float64)
return cache
}
func (cache *_FactorUpdateCache) Add(index int, update []float64) {
// Create if not exist
if _, exist := cache.cache[index]; !exist {
cache.cache[index] = make([]float64, cache.nFactor)
}
floats.Add(cache.cache[index], update)
}
func (cache *_FactorUpdateCache) Commit(factors [][]float64) {
for index, update := range cache.cache {
floats.Add(factors[index], update)
}
}
// FM is the implementation of factorization machine [12]. The prediction is given by
//
// \hat y(x) = w_0 + \sum^n_{i=1} w_i x_i + \sum^n_{i=1} \sum^n_{j=i+1} <v_i, v_j>x_i x_j
//
// Hyper-parameters:
// Reg - The regularization parameter of the cost function that is
// optimized. Default is 0.02.
// Lr - The learning rate of SGD. Default is 0.005.
// nFactors - The number of latent factors. Default is 100.
// NEpochs - The number of iteration of the SGD procedure. Default is 20.
// InitMean - The mean of initial random latent factors. Default is 0.
// InitStdDev - The standard deviation of initial random latent factors. Default is 0.1.
type FM struct {
Base
UserFeatures []*base.SparseVector
ItemFeatures []*base.SparseVector
// Model parameters
GlobalBias float64 // w_0
Bias []float64 // w_i
Factors [][]float64 // v_i
// Hyper parameters
useBias bool
nFactors int
nEpochs int
lr float64
reg float64
initMean float64
initStdDev float64
optimizer string
// Fallback model
UserRatings []*base.MarginalSubSet
ItemPop *ItemPop
}
// NewFM creates a factorization machine.
func NewFM(params base.Params) *FM {
fm := new(FM)
fm.SetParams(params)
return fm
}
// SetParams sets hyper-parameters of the factorization machine.
func (fm *FM) SetParams(params base.Params) {
fm.Base.SetParams(params)
// Setup hyper-parameters
fm.useBias = fm.Params.GetBool(base.UseBias, true)
fm.nFactors = fm.Params.GetInt(base.NFactors, 100)
fm.nEpochs = fm.Params.GetInt(base.NEpochs, 20)
fm.lr = fm.Params.GetFloat64(base.Lr, 0.005)
fm.reg = fm.Params.GetFloat64(base.Reg, 0.02)
fm.initMean = fm.Params.GetFloat64(base.InitMean, 0)
fm.initStdDev = fm.Params.GetFloat64(base.InitStdDev, 0.1)
fm.optimizer = fm.Params.GetString(base.Optimizer, base.SGD)
}
// Predict by the factorization machine.
func (fm *FM) Predict(userId int, itemId int) float64 {
// Convert sparse IDs to dense IDs
userIndex := fm.UserIndexer.ToIndex(userId)
itemIndex := fm.ItemIndexer.ToIndex(itemId)
vector := fm.encode(userIndex, itemIndex)
return fm.predict(vector)
}
func (fm *FM) encode(userIndex int, itemIndex int) *base.SparseVector {
// Get user features and item features
var userFeature, itemFeature *base.SparseVector
if fm.UserFeatures != nil {
userFeature = fm.UserFeatures[userIndex]
}
if fm.ItemFeatures != nil {
itemFeature = fm.ItemFeatures[itemIndex]
}
// Encode feature vector
vectorSize := 2 + userFeature.Len() + itemFeature.Len()
vector := &base.SparseVector{
Indices: make([]int, 0, vectorSize),
Values: make([]float64, 0, vectorSize),
}
vector.Add(userIndex, 1)
vector.Add(itemIndex+fm.UserIndexer.Len(), 1)
userFeature.ForEach(func(i, index int, value float64) {
vector.Add(index+fm.UserIndexer.Len()+fm.ItemIndexer.Len(), value)
})
itemFeature.ForEach(func(i, index int, value float64) {
vector.Add(index+fm.UserIndexer.Len()+fm.ItemIndexer.Len(), value)
})
//fmt.Println(vector)
return vector
}
func (fm *FM) predict(vector *base.SparseVector) float64 {
predict := fm.GlobalBias
vector.ForEach(func(i, index int, value float64) {
predict += fm.Bias[index]
})
for i := 0; i < vector.Len(); i++ {
for j := i + 1; j < vector.Len(); j++ {
factor1 := fm.Factors[vector.Indices[i]]
factor2 := fm.Factors[vector.Indices[j]]
value1 := vector.Values[i]
value2 := vector.Values[j]
predict += floats.Dot(factor1, factor2) * value1 * value2
}
}
return predict
}
// Fit the factorization machine.
func (fm *FM) Fit(trainSet core.DataSetInterface, options *base.RuntimeOptions) {
fm.Init(trainSet)
fm.UserFeatures = trainSet.UserFeatures()
fm.ItemFeatures = trainSet.ItemFeatures()
// Initialization
paramCount := trainSet.UserCount() + trainSet.ItemCount() + trainSet.FeatureCount()
fm.GlobalBias = trainSet.GlobalMean()
fm.Bias = make([]float64, paramCount)
fm.Factors = fm.rng.NewNormalMatrix(paramCount, fm.nFactors, fm.initMean, fm.initStdDev)
// Optimize
switch fm.optimizer {
case base.SGD:
fm.fitSGD(trainSet, options)
case base.BPR:
fm.fitBPR(trainSet, options)
default:
panic(fmt.Sprintf("Unknown optimizer: %v", fm.optimizer))
}
}
func (fm *FM) fitSGD(trainSet core.DataSetInterface, options *base.RuntimeOptions) {
// Create buffers
temp := make([]float64, fm.nFactors)
gradFactor := make([]float64, fm.nFactors)
// Optimize
for epoch := 0; epoch < fm.nEpochs; epoch++ {
cost := 0.0
for i := 0; i < trainSet.Count(); i++ {
userIndex, itemIndex, rating := trainSet.GetWithIndex(i)
vector := fm.encode(userIndex, itemIndex)
// Compute error: e_{ui} = r - \hat r
upGrad := rating - fm.predict(vector)
cost += upGrad * upGrad
// Update global bias
// \frac {\partial\hat{y}(x)} {\partial w_0} = 1
gradGlobalBias := upGrad - fm.reg*fm.GlobalBias
fm.GlobalBias += fm.lr * gradGlobalBias
// Update bias
// \frac {\partial\hat{y}(x)} {\partial w_i} = x_i
vector.ForEach(func(_, index int, value float64) {
gradBias := upGrad*value - fm.reg*fm.Bias[index]
fm.Bias[index] += fm.lr * gradBias
})
// Update factors
// \frac {\partial\hat{y}(x)} {\partial v_{i,f}}
// = x_i \sum^n_{j=1} v_{i,f}x_j - v_{i,g}x^2_i
// 1. Pre-compute \sum^n_{j=1} v_{i,f}x_j
base.FillZeroVector(temp)
vector.ForEach(func(_, index int, value float64) {
floats.MulConstAddTo(fm.Factors[index], value, temp)
})
// 2. Update by x_i \sum^n_{j=1} v_{i,f}x_j - v_{i,g}x^2_i
vector.ForEach(func(_, index int, value float64) {
floats.MulConstTo(temp, upGrad*value, gradFactor)
floats.MulConstAddTo(fm.Factors[index], -upGrad*value*value, gradFactor)
floats.MulConstAddTo(fm.Factors[index], -fm.reg, gradFactor)
floats.MulConstAddTo(gradFactor, fm.lr, fm.Factors[index])
})
}
options.Logf("epoch = %v/%v, cost = %v", epoch+1, fm.nEpochs, cost)
}
}
func (fm *FM) fitBPR(trainSet core.DataSetInterface, options *base.RuntimeOptions) {
fm.UserRatings = trainSet.Users()
// Create item pop model
fm.ItemPop = NewItemPop(nil)
fm.ItemPop.Fit(trainSet, options)
// Create buffers
temp := make([]float64, fm.nFactors)
gradFactor := make([]float64, fm.nFactors)
// Training
for epoch := 0; epoch < fm.nEpochs; epoch++ {
// Training epoch
cost := 0.0
for i := 0; i < trainSet.Count(); i++ {
// Select a user
var userIndex, ratingCount int
for {
userIndex = fm.rng.Intn(trainSet.UserCount())
ratingCount = trainSet.UserByIndex(userIndex).Len()
if ratingCount > 0 {
break
}
}
posIndex := trainSet.UserByIndex(userIndex).GetIndex(fm.rng.Intn(ratingCount))
// Select a negative sample
negIndex := -1
for {
temp := fm.rng.Intn(trainSet.ItemCount())
tempId := fm.ItemIndexer.ToID(temp)
if !trainSet.UserByIndex(userIndex).Contain(tempId) {
negIndex = temp
break
}
}
posVec := fm.encode(userIndex, posIndex)
negVec := fm.encode(userIndex, negIndex)
diff := fm.predict(posVec) - fm.predict(negVec)
cost += math.Log(1.0 + math.Exp(-diff))
upGrad := math.Exp(-diff) / (1.0 + math.Exp(-diff))
// Update bias:
// \frac {\partial\hat{y}(x)} {\partial w_i} = x_i
biasCache := _NewBiasUpdateCache()
// 1. Positive sample
posVec.ForEach(func(_, index int, value float64) {
gradBias := upGrad*value - fm.reg*fm.Bias[index]
biasCache.Add(index, fm.lr*gradBias)
})
// 2. Negative sample
negVec.ForEach(func(_, index int, value float64) {
gradBias := -upGrad*value - fm.reg*fm.Bias[index]
biasCache.Add(index, fm.lr*gradBias)
})
// 3. Commit update
biasCache.Commit(fm.Bias)
// Update factors:
// \frac {\partial\hat{y}(x)} {\partial v_{i,f}}
// = x_i \sum^n_{j=1} v_{i,f}x_j - v_{i,g}x^2_i
factorCache := _NewFactorUpdateCache(fm.nFactors)
// 1. Positive sample
base.FillZeroVector(temp)
posVec.ForEach(func(_, index int, value float64) {
floats.MulConstAddTo(fm.Factors[index], value, temp)
})
posVec.ForEach(func(_, index int, value float64) {
floats.MulConstTo(temp, upGrad*value, gradFactor)
floats.MulConstAddTo(fm.Factors[index], -upGrad*value*value, gradFactor)
floats.MulConstAddTo(fm.Factors[index], -fm.reg, gradFactor)
floats.MulConst(gradFactor, fm.lr)
factorCache.Add(index, gradFactor)
})
// 2. Negative sample
base.FillZeroVector(temp)
negVec.ForEach(func(_, index int, value float64) {
floats.MulConstAddTo(fm.Factors[index], value, temp)
})
negVec.ForEach(func(_, index int, value float64) {
floats.MulConstTo(temp, -upGrad*value, gradFactor)
floats.MulConstAddTo(fm.Factors[index], upGrad*value*value, gradFactor)
floats.MulConstAddTo(fm.Factors[index], -fm.reg, gradFactor)
floats.MulConst(gradFactor, fm.lr)
factorCache.Add(index, gradFactor)
})
// 3. Commit update
factorCache.Commit(fm.Factors)
}
options.Logf("epoch = %v/%v, cost = %v", epoch+1, fm.nEpochs, cost)
}
}
|
package main
// 一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
// 机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
// 现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
func uniquePathsWithObstacles(obstacleGrid [][]int) int {
maxX := len(obstacleGrid)
maxY := len(obstacleGrid[0])
status := make([][]int, maxX)
for i := range status {
status[i] = make([]int, maxY)
for j := range status[i] {
if obstacleGrid[i][j] == 1 {
// 障碍物
status[i][j] = 0
} else if i == 0 && j == 0 {
// 起点
status[i][j] = 1
} else if i == 0 {
// 顶边
status[i][j] = status[i][j-1]
} else if j == 0 {
// 底边
status[i][j] = status[i-1][j]
} else {
status[i][j] = status[i-1][j] + status[i][j-1]
}
}
}
return status[maxX-1][maxY-1]
}
|
package main
import "fmt"
type Minutes int
type Hours int
type Weight float64
type Title string
type Answer bool
func main() {
minutes := Minutes(37)
hours := Hours(2)
weight := Weight(945.7)
name := Title("The Matrix")
answer := Answer(true)
fmt.Println(minutes, hours, weight, name, answer)
minutes += 3
fmt.Println(minutes)
}
|
package main
//heads up, go does not have a while loop
import (
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
)
//What is unmarshalling and marshalling?
/*In computer science, unmarshalling or unmarshaling refers to the process of transforming a representation of an object that was used for storage or transmission to a representation of the object that is executable.
A serialized object which was used for communication can not be processed by a computer program.
An unmarshalling interface takes the serialized object and transforms it into an executable form. Unmarshalling (similar to deserialization) is the reverse process of marshalling.
In few words, "marshalling" refers to the process of converting the data or the objects inbto a byte-stream, and "unmarshalling" is the reverse process of converting the byte-stream beack to their original data or object.
The conversion is achieved through "serialization".
*/
//since the washingtonpost daily changes there xml structure, we kinda pivoted here, we are no longer getting data from internet, but doing the same thing, we are parsing the data from an xml sheet
var washpostXML = []byte(`
<sitemapindex>
<sitemap>
<loc>http://www.washingtonpost.com/news-politics-sitemap.xml</loc>
</sitemap>
<sitemap>
<loc>http://www.washingtonpost.com/news-blogs-technology-sitemap.xml</loc>
</sitemap>
<sitemap>
<loc>http://www.washingtonpost.com/news-opinions-sitemap.xml</loc>
</sitemap>
</sitemapindex>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:n="http://www.google.com/schemas/sitemap-news/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd
http://www.google.com/schemas/sitemap-news/0.9
http://www.google.com/schemas/sitemap-news/0.9/sitemap-news.xsd">
<url>
<loc>https://www.washingtonpost.com/business/technology/un-adds-32-items-to-list-of-prohibited-goods-for-north-korea/2017/10/23/5f112818-b812-11e7-9b93-b97043e57a22_story.html</loc>
<changefreq>hourly</changefreq>
<n:news>
<n:publication>
<n:name>Washington Post</n:name>
<n:language>en</n:language>
</n:publication>
<n:publication_date>2017-10-23T22:12:20Z</n:publication_date>
<n:title>UN adds 32 items to list of prohibited goods for North Korea</n:title>
<n:keywords>
UN-United Nations-North Korea-Sanctions,North Korea,East Asia,Asia,United Nations Security Council,United Nations,Business,General news,Sanctions and embargoes,Foreign policy,International relations,Government and politics,Government policy,Military technology,Technology</n:keywords>
</n:news>
</url>
<url>
<loc>https://www.washingtonpost.com/business/technology/cisco-systems-buying-broadsoft-for-19-billion-cash/2017/10/23/ae024774-b7f2-11e7-9b93-b97043e57a22_story.html</loc>
<changefreq>hourly</changefreq>
<n:news>
<n:publication>
<n:name>Washington Post</n:name>
<n:language>en</n:language>
</n:publication>
<n:publication_date>2017-10-23T21:42:14Z</n:publication_date>
<n:title>Cisco Systems buying BroadSoft for $1.9 billion cash</n:title>
<n:keywords>
US-Cisco-BroadSoft-Acquisition,Cisco Systems Inc,Business,Technology,Communication technology</n:keywords>
</n:news>
</url>
</urlset>`)
//we cleaned up the previous code to make it more lean
//Goal- visiting sitemap links
//since we had only one string to pull,we do it here only and reference loc tag og xml
// we also delete the overriding function as that fetched us strings. Dude we already got strings here.
type SitemapIndexlean struct {
//we make a location type array to store data from sitemap>location
Locations []string `xml:"sitemap>loc"`
}
type News struct {
//We create a second datatype news to store titles, keywords and locations
Titles []string `xml:"url>news>title"`
Keywords []string `xml:"url>news>keywords"`
Locations []string `xml:"url>loc"`
}
type NewsMap struct {
//This is to iterate through our datatype values
Keyword string
Location string
}
func main() {
var s SitemapIndexlean
var n News
news_map := make(map[string]NewsMap)
bytes := washpostXML
//converting a byte stream to actual xml, so as to get values from it. Also we need to store the data in byte stream because that's what we get from internet when we get any xml.
xml.Unmarshal(bytes, &s)
//iterating through every index in the range of available urls/locations, we get locations from our sitemap
for _, Location := range s.Locations {
resp, _ := http.Get(Location)
bytes, _ := ioutil.ReadAll(resp.Body)
xml.Unmarshal(bytes, &n)
//for every location that we get, we need to be able to access it, go inside it
for idx, _ := range n.Titles {
//we use the map with the key as the titles, in the titles array, we move through locations like Title[0], Title[1] etc. We do this using the idx that we created.
//idx itself iterates from 0 to the length of the range of titles
//now this last part is a bit confusing
//The Newsmap here is a structure, remember when we made a map on line 99? we defined the key as string, which here is n.Titles[idx], an array of strings
//and the value as Newsmap, which is a stuct and can take many values.
//so for every key, we are storing two values.
// the map will look like ["TechnologyTitle1: {keywords,location of first news}, TechnologyTitle2:{ keywords,location of second news } ] and so on
news_map[n.Titles[idx]] = NewsMap{n.Keywords[idx], n.Locations[idx]}
}
}
//We simply iterate through our populated news_map by proper formatting here
for idx, data := range news_map {
fmt.Println("\n\n\n", idx)
fmt.Println("\n", data.Keyword)
fmt.Println("\n", data.Location)
}
}
|
package memsearch
import (
"testing"
"github.com/manishrjain/gocrud/testx"
)
func initialize() *MemSearch {
ms := new(MemSearch)
ms.Init()
testx.AddDocs(ms)
return ms
}
func TestNewAndFilter(t *testing.T) {
testx.RunAndFilter(ms, t)
}
var soln = [...]string{
"m81",
"ngc 3370",
"galaxy ngc 1512",
"ngc 123",
"whirlpool galaxy",
"sombrero galaxy",
}
func TestNewOrFilter(t *testing.T) {
testx.RunOrFilter(ms, t)
}
func TestCount(t *testing.T) {
testx.RunCount(ms, t)
}
func TestFrom(t *testing.T) {
testx.RunFromLimit(ms, t)
}
var ms *MemSearch
func init() {
ms = initialize()
}
|
package service
import (
"context"
"github.com/go-ocf/cloud/grpc-gateway/pb"
)
func (r *RequestHandler) GetClientConfiguration(context.Context, *pb.ClientConfigurationRequest) (*pb.ClientConfigurationResponse, error) {
return &r.clientConfiguration, nil
}
|
package b2
import (
"context"
"net/http"
)
const (
createBucketURL = "b2api/v2/b2_create_bucket"
listBucketsURL = "b2api/v2/b2_list_buckets"
)
// Bucket is used to represent a B2 Bucket
type Bucket struct {
AccountID string `json:"accountId"`
ID string `json:"bucketId"`
Info map[string]string `json:"bucketInfo"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []BucketLifecycleRule `json:"lifecycleRules"`
Revision int `json:"revision"`
}
// BucketCreateRequest represents a request to create a Bucket
type BucketCreateRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
Info map[string]string `json:"bucketInfo,omitempty"`
CorsRules []BucketCorsRule `json:"corsRules,omitempty"`
LifecycleRules []BucketLifecycleRule `json:"lifecycleRules,omitempty"`
}
// BucketCorsRule is used to represent a Bucket's CORS rule
//
// See more on https://www.backblaze.com/b2/docs/cors_rules.html
type BucketCorsRule struct {
Name string `json:"corsRuleName"`
AllowedOrigins []string `json:"allowedOrigins"`
AllowedHeaders []string `json:"allowedHeaders"`
AllowedOperations []string `json:"allowedOperations"`
ExposeHeaders []string `json:"exposeHeaders"`
MaxAgeSeconds int `json:"maxAgeSeconds"`
}
// BucketLifecycleRule tells B2 to automatically hide and/or delete old files
//
// See more on https://www.backblaze.com/b2/docs/lifecycle_rules.html
type BucketLifecycleRule struct {
DaysFromHidingToDeleting int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
}
// BucketListRequest represents a request to list Buckets
type BucketListRequest struct {
AccountID string `json:"accountId"`
BucketID string `json:"bucketId,omitempty"`
Name string `json:"bucketName,omitempty"`
Types string `json:"bucketTypes,omitempty"`
}
type bucketListRoot struct {
Buckets []Bucket `json:"buckets"`
}
// BucketService handles communication with the Bucket related methods of the
// B2 API
type BucketService struct {
client *Client
}
// Create a new Bucket
func (s *BucketService) Create(ctx context.Context, createRequest *BucketCreateRequest) (*Bucket, *http.Response, error) {
// TODO: check authorization for creating buckets
req, err := s.client.NewRequest(ctx, http.MethodPost, createBucketURL, createRequest)
if err != nil {
return nil, nil, err
}
bucket := new(Bucket)
resp, err := s.client.Do(req, bucket)
if err != nil {
return nil, resp, err
}
return bucket, resp, err
}
// List all Buckets
func (s *BucketService) List(ctx context.Context, listRequest *BucketListRequest) ([]Bucket, *http.Response, error) {
req, err := s.client.NewRequest(ctx, http.MethodPost, listBucketsURL, listRequest)
if err != nil {
return nil, nil, err
}
root := new(bucketListRoot)
resp, err := s.client.Do(req, root)
if err != nil {
return nil, resp, err
}
return root.Buckets, resp, err
}
|
package main
import "fmt"
var a int
func main() {
var mp = map[int](func() int){
1: func() int { return 10 },
2: func() int { return 20 },
3: func() int { return 30 },
}
fmt.Printf("%v", mp)
}
|
package main
import "log"
func main() {
amar, ranjan, deepak, satyam := Customer{}, Customer{}, Customer{}, Customer{}
govind, abhay, anuj, sunny := Student{}, Student{}, Student{}, Student{}
/*customerList := []StudentAndCustomer{amar,ranjan,deepak,satyam}
studentList := []StudentAndCustomer{govind,abhay,anuj,sunny}*/
amar.Create("amar", 0, "manpur", 84946161944)
ranjan.Create("ranjan", 1, "gaya", 79616494669)
deepak.Create("deepak", 2, "bihar", 496164941619)
satyam.Create("satyam", 3, "manpur", 4941694616)
govind.Create("govind", 0, "manpur", 84946161944)
abhay.Create("abhay", 1, "gaya", 79616494669)
anuj.Create("anuj", 2, "bihar", 496164941619)
sunny.Create("sunny", 3, "manpur", 4941694616)
log.Println(GetCustomerById(0))
}
|
package chapter1
// 声明存储结构
var store = make([]string, 0)
// 索引数据
func Index(data string) {
store = append(store, data)
}
// 检索数据
func Search(query string) string {
for _, name := range store {
if name == query {
return name
}
}
return ""
}
|
package lolbas
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
log "github.com/sirupsen/logrus"
"github.com/sudneo/gtfodora/pkg/binary"
cloner "github.com/sudneo/gtfodora/pkg/repo_utils"
"gopkg.in/yaml.v2"
)
const (
repoURL string = "https://github.com/LOLBAS-Project/LOLBAS"
)
type lolbasbin struct {
Name string `yaml:"Name"`
Description string `yaml:"Description"`
Author interface{} `yaml:"Author"`
Created string `yaml:"Created"`
Commands []commandSpec `yaml:"Commands"`
FullPath []struct {
Path string `yaml:"Path"`
} `yaml:"Full_Path"`
CodeSample []struct {
Code string `yaml:"Code"`
} `yaml:"Code_Sample"`
Detection []struct {
IOC interface{} `yaml:"IOC"`
} `yaml:"Detection"`
Resources []struct {
Link string `yaml:"Link"`
} `yaml:"Resources"`
Acknowledgement []struct {
Person string `yaml:"Person"`
Handle string `yaml:"Handle"`
} `yaml:"Acknowledgement"`
}
type commandSpec struct {
Command string `yaml:"Command"`
Description string `yaml:"Description"`
UseCase string `yaml:"UseCase"`
Category string `yaml:"Category"`
Privileges string `yaml:"Privileges"`
MitreID string `yaml:"MitreID"`
MItreLink string `yaml:"MItreLink"`
OperatingSystem string `yaml:"OperatingSystem"`
}
type spec struct {
Description string
Code string
}
func Clone(path string) {
err := cloner.Clone_repo(repoURL, path)
if err != nil {
log.Warn("Failed to clone LOLbas repository, results will be partial")
}
}
func pull(path string) {
err := cloner.Pull_repo(path)
if err != nil {
log.Warn("Failed to pull the LOLbas repository, results might be outdated.")
}
}
func Parse(filePath string) lolbasbin {
yamlFile, err := ioutil.ReadFile(filePath)
var bin lolbasbin
if err != nil {
fmt.Println("Error parsing file")
fmt.Println(err.Error())
}
err = yaml.Unmarshal(yamlFile, &bin)
return bin
}
func (l *lolbasbin) transform() binary.Binary {
var bin binary.Binary
bin.Name = l.Name
bin.Type = "win"
for _, c := range l.Commands {
var cmd binary.Command
// Check current bin commands to see if there is already one with same
// category and in case append to it
existing := false
for i, _ := range bin.Commands {
if bin.Commands[i].Function == strings.ToLower(c.Category) {
det := bin.Commands[i].Details
bin.Commands[i].Details = append(det, binary.FunctionSpec{
Description: c.Description,
Code: c.Command})
existing = true
}
}
if !existing {
switch strings.ToLower(c.Category) {
case "execute":
cmd.Function = "command"
case "awl Bypass":
cmd.Function = "awlbypass"
case "uac bypass":
cmd.Function = "uacbypass"
default:
cmd.Function = strings.ToLower(c.Category)
}
cmd.Details = append(cmd.Details, binary.FunctionSpec{
Description: c.Description,
Code: c.Command})
bin.Commands = append(bin.Commands, cmd)
}
}
return bin
}
func ParseAll(path string) []binary.Binary {
cloner.Pull_repo(path)
binary_path := path + "/yml/"
var files []string
var parsedFiles []binary.Binary
err := filepath.Walk(binary_path, func(path string, info os.FileInfo, err error) error {
files = append(files, path)
return nil
})
if err != nil {
log.WithFields(log.Fields{
"Path": path,
}).Error("Failed to walk the specified path")
return parsedFiles
}
for _, file := range files {
if info, err := os.Stat(file); err == nil && !info.IsDir() {
if filepath.Ext(file) == ".yml" {
f := Parse(file)
parsedFiles = append(parsedFiles, f.transform())
}
}
}
return parsedFiles
}
|
/*
* This file is part of impacca. Copyright (C) 2013 and above Shogun <shogun@cowtech.it>.
* Licensed under the MIT license, which can be found at https://choosealicense.com/licenses/mit.
*/
package main
import (
"github.com/ShogunPanda/tempera"
"github.com/spf13/cobra"
"github.com/ShogunPanda/impacca/commands/changelog"
"github.com/ShogunPanda/impacca/commands/publish"
"github.com/ShogunPanda/impacca/commands/release"
"github.com/ShogunPanda/impacca/commands/version"
)
func main() {
tempera.AddCustomStyle("primary", "bold", "blue")
tempera.AddCustomStyle("secondary", "bold", "yellow")
tempera.AddCustomStyle("errorPrimary", "bold", "white")
var rootCmd = &cobra.Command{Use: "impacca", Short: "Package releasing made easy."}
rootCmd.Version = "2.0.5"
rootCmd.PersistentFlags().BoolP("dry-run", "n", false, "Do not execute write operation, only show them.")
rootCmd.AddCommand(version.InitCLI())
rootCmd.AddCommand(changelog.InitCLI())
rootCmd.AddCommand(publish.InitCLI())
rootCmd.AddCommand(release.InitCLI())
rootCmd.Execute()
}
|
package committer
import (
"fmt"
"regexp"
)
// PatternMatch checks whether the given msg follows proper style or out
// the pattern is defined under getPattern and
// built in regexp.MatchString check the format of commit message
// if it fails, it let users know which rules they have to follow
func PatternMatch(m Message) (errMsg string, ok bool) {
p := getPattern()
mStr := string(m)
// style check
matched, err := regexp.MatchString(p, mStr)
if err != nil {
return fmt.Errorf("error whilst checking commit message %v: %w", m, err).Error(), false
}
// since this function is final check, print out the commit message check result
if !matched {
return fmt.Sprintf("invalid commit message. \n\tmust follow this rule: %v\n\t\t", p), false
}
return "", matched
}
// getPattern return a regular expression of conventional commit message
// The regex pattern is highly inspired by convention of Angular and the Conventional Commits
// however, it could be customized by `@ms-home` in the future, depending on the team's situation
// FYI, the types of commits in regex sorted in alphabetical order for better readability
func getPattern() string {
const pattern = `^(BREAKING CHANGE|build|chore|ci|docs|feat|fix|perf|refactor|style|test)(\([a-z \-]+\))?: [\w \-]+$`
return pattern
}
|
package http
import (
"../g"
"net/http"
"strings"
)
func configReloadRoutes(){
http.HandleFunc("/config/reload", func(w http.ResponseWriter, r *http.Request){
if strings.HasPrefix(r.RemoteAddr, "127.0.0.1"){
err := g.ParseConfig(g.ConfigFile)
AutoRender(w, g.Config(), err)
}else {
w.Write([]byte("no privilege"))
}
})
}
|
// Refactor a ledger printer.
package ledger
import (
"errors"
"fmt"
"sort"
"strings"
)
const testVersion = 4
type Entry struct {
Date string // "Y-m-date"
Description string
Change int // in cents
}
var currencyMap = map[string]string{
"EUR": "€", "USD": "$",
}
var localeMap = map[string]struct {
header, dateFmt, numFmt, thouSep, posFmt, negFmt string
}{
"nl-NL": {fmt.Sprintf("%-10s | %-25s | Verandering\n", "Datum",
"Omschrijving"), "%s-%s-%s", // day-mont-year
"%s %s,%s", ".", "%s ", "%s-"},
"en-US": {fmt.Sprintf("%-10s | %-25s | Change\n", "Date", "Description"),
"%[2]s/%[1]s/%[3]s", // month/day/year
"%s%s.%s", ",", "%s ", "(%s)"},
}
func FormatChange(currency string, locale string, change int) string {
currencySymbol := currencyMap[currency]
negative := false
cents := change
if cents < 0 {
cents = -cents
negative = true
}
centsStr := fmt.Sprintf("%03d", cents)
rest := centsStr[:len(centsStr)-2]
var parts []string
for len(rest) > 3 {
parts = append([]string{rest[len(rest)-3:]}, parts...)
rest = rest[:len(rest)-3]
}
if len(rest) > 0 {
parts = append([]string{rest}, parts...)
}
format := localeMap[locale]
number := ""
number = fmt.Sprintf(format.numFmt,
currencySymbol, strings.Join(parts, format.thouSep),
centsStr[len(centsStr)-2:])
if negative {
number = fmt.Sprintf(format.negFmt, number)
} else {
number = fmt.Sprintf(format.posFmt, number)
}
return number
}
func FormatLedger(currency string, locale string, entries []Entry) (string, error) {
// Error check
if _, ok := currencyMap[currency]; !ok {
return "", errors.New("Unsupported currency")
}
if _, ok := localeMap[locale]; !ok {
return "", errors.New("Unsupported locale")
}
for _, entry := range entries {
if len(entry.Date) != 10 ||
entry.Date[4] != '-' || entry.Date[7] != '-' {
return "", errors.New("Incorrect date format")
}
}
if len(entries) == 0 {
locale = "en-US"
}
// sort by date, description, change, ascending
entriesCopy := make([]Entry, len(entries))
copy(entriesCopy, entries)
sort.Sort(entryList(entriesCopy))
// Parallelism is always a great idea, and it provies to be faster
// at local env. Even tho eventually the results are collceted sequentially
// which meaning the formatting takes long
co := make(chan struct {
i int // to maintain the sequence
s string
})
for i, et := range entriesCopy {
go func(i int, entry Entry) {
year, month, day := entry.Date[0:4], entry.Date[5:7], entry.Date[8:10]
var date string
date = fmt.Sprintf(localeMap[locale].dateFmt, day, month, year)
desc := entry.Description
if len(desc) > 25 {
desc = desc[:22] + "..."
} else {
desc = fmt.Sprintf("%-25s", desc)
}
money := FormatChange(currency, locale, entry.Change)
co <- struct {
i int
s string
}{i: i, s: fmt.Sprintf("%-10s | %s | %13s\n", date, desc, money)}
}(i, et)
}
ss := make([]string, len(entriesCopy))
for range entriesCopy {
v := <-co
ss[v.i] = v.s
}
s := localeMap[locale].header // table head
s = s + strings.Join(ss, "")
return s, nil
}
type entryList []Entry
func (e entryList) Len() int { return len(e) }
func (e entryList) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e entryList) Less(i, j int) bool {
switch {
case e[i].Date < e[j].Date:
return true
case e[i].Date > e[j].Date:
return false
case e[i].Description < e[j].Description:
return true
case e[i].Description > e[j].Description:
return false
case e[i].Change < e[j].Change:
return true
case e[i].Change < e[j].Change:
return false
default:
return false
}
return false
}
|
package main
import (
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
)
// Message which can be send to Google Chat
// More info at https://developers.google.com/hangouts/chat/reference/message-formats/cards
type Message struct {
Text string `json:"text,omitempty"`
Cards []Card `json:"cards"`
}
// Card property of a message. can contain a header and must have at least one section
type Card struct {
// header object (optional)
Header *Header `json:"header,omitempty"`
// sections object. At least one section is required
Sections []Section `json:"sections,omitempty"`
}
// Header property of a card
type Header struct {
Title string
Subtitle string
ImageURL string
// imageStyle controls the shape of the header image, which may be "square" ("IMAGE") or "circular" ("AVATAR"). The default is "square" ("IMAGE").
ImageStyle string
}
// MarshalJSON implements json.Marshaler.MarshalJSON.
func (h Header) MarshalJSON() ([]byte, error) {
m := make(map[string]interface{})
if h.Title != "" {
m["title"] = h.Title
}
if h.Subtitle != "" {
m["subtitle"] = h.Subtitle
}
if h.ImageURL != "" {
m["imageUrl"] = h.ImageURL
if h.ImageStyle != "" {
m["imageStyle"] = h.ImageStyle
switch h.ImageStyle {
case "circular":
m["imageStyle"] = "AVATAR"
case "square":
fallthrough
default:
m["imageStyle"] = "IMAGE"
}
}
}
return json.Marshal(m)
}
// CreateHeader creates a Header struct if at least one field is present, return nil pointer otherwise
func CreateHeader(title, subtitle, imageURL, imageStyle string) *Header {
if title == "" && subtitle == "" && imageURL == "" && imageStyle == "" {
return nil
}
return &Header{
Title: title,
Subtitle: subtitle,
ImageURL: imageURL,
ImageStyle: imageStyle,
}
}
// Section of a card. can contain multiple widgets, but at least one is required. Sections are separated by a horizontal line
type Section struct {
// Section header (optional)
Header string `json:"header,omitempty"`
// widgets object. At least one widget is required.
Widgets []*Widget `json:"widgets,omitempty"`
}
// Widget of a section. Can contain only one type of UI element
type Widget struct {
TextParagraph *TextParagraph `json:"textParagraph,omitempty"`
KeyValue *KeyValue `json:"keyValue,omitempty"`
Image *Image `json:"image,omitempty"`
// buttons object can contain one or more buttons. will be laid out horizontally
Buttons []*Button `json:"buttons,omitempty"`
}
// TextParagraph UI element
type TextParagraph struct {
// The text to display inside the paragraph
Text string `json:"text,omitempty"`
}
// KeyValue UI element
type KeyValue struct {
TopLabel string `json:"topLabel,omitempty"`
Content string `json:"content,omitempty"`
ContentMultiline string `json:"contentMultiline,omitempty"`
BottomLabel string `json:"bottomLabel,omitempty"`
OnClick *OnClick `json:"onClick,omitempty"`
// either iconUrl of icon can be used
IconURL string `json:"iconUrl,omitempty"`
// either iconUrl of icon can be used
Icon string `json:"icon,omitempty"`
Button *Button `json:"button,omitempty"`
}
// ParseKeyValues parses a simpler KeyValue json array to the KeyValue object used by the api
func ParseKeyValues(raw string) (widgets []*Widget, err error) {
// ignore empty string
if raw == "" {
return
}
var keyValueInput []KeyValueInput
err = json.Unmarshal([]byte(raw), &keyValueInput)
if err != nil {
return
}
// Throw error for empty array
if len(keyValueInput) == 0 {
// Ignore an empty array: It is a way to remove the keyValues in case of an error
return
}
widgets = []*Widget{}
for _, keyValue := range keyValueInput {
if keyValue.Content == "" {
err = fmt.Errorf("KeyValue Should have content")
return
}
// either iconUrl of icon can be used
if keyValue.IconURL != "" && keyValue.Icon != "" {
err = fmt.Errorf("KeyValue object should have either an iconUrl, an icon, or neither, but not both")
return
}
var onClick *OnClick
if keyValue.OnClick != "" {
onClick = &OnClick{
OpenLink: &OpenLink{
URL: keyValue.OnClick,
},
}
}
var button *Button
if keyValue.Button != nil {
if keyValue.Button.OnClick == "" {
err = fmt.Errorf("KeyValue button should have an onClick value")
return
}
if (keyValue.Button.Text != "" && (keyValue.Button.IconURL != "" || keyValue.Button.Icon != "")) || (keyValue.Button.IconURL != "" && (keyValue.Button.Text != "" || keyValue.Button.Icon != "")) {
err = fmt.Errorf("KeyValue button should have either a text, an iconUrl, or an icon field, not multiple")
return
}
var buttonOnClick = &OnClick{
OpenLink: &OpenLink{
URL: keyValue.Button.OnClick,
},
}
var textButton *TextButton
if keyValue.Button.Text != "" {
textButton = &TextButton{
Text: keyValue.Button.Text,
OnClick: buttonOnClick,
}
}
var imageButton *ImageButton
if keyValue.Button.Icon != "" || keyValue.Button.IconURL != "" {
imageButton = &ImageButton{
IconURL: keyValue.Button.IconURL,
Icon: keyValue.Button.Icon,
OnClick: buttonOnClick,
}
}
if textButton == nil && imageButton == nil {
err = fmt.Errorf("KeyValue button should have either an iconUrl, an icon or text field")
return
}
button = &Button{
TextButton: textButton,
ImageButton: imageButton,
}
}
widgets = append(widgets, &Widget{
KeyValue: &KeyValue{
TopLabel: keyValue.TopLabel,
Content: keyValue.Content,
ContentMultiline: strconv.FormatBool(keyValue.ContentMultiline),
BottomLabel: keyValue.BottomLabel,
OnClick: onClick,
IconURL: keyValue.IconURL,
Icon: keyValue.Icon,
Button: button,
},
})
}
return
}
// Image UI element
type Image struct {
ImageURL string `json:"imageUrl,omitempty"`
OnClick *OnClick `json:"onClick,omitempty"`
}
// Button UI element. Can contain either a TextButton or an ImageButton
type Button struct {
TextButton *TextButton `json:"textButton,omitempty"`
ImageButton *ImageButton `json:"imageButton,omitempty"`
}
// TextButton UI element
type TextButton struct {
Text string `json:"text,omitempty"`
OnClick *OnClick `json:"onClick,omitempty"`
}
// ImageButton UI element
type ImageButton struct {
// either iconUrl of icon can be used
IconURL string `json:"iconUrl,omitempty"`
// either iconUrl of icon can be used
Icon string `json:"icon,omitempty"`
OnClick *OnClick `json:"onClick,omitempty"`
}
func parseButtons(s string) (buttons []*Button, err error) {
var buttonConf [][3]string
buttonConf, err = triples(s)
if err != nil {
return
}
for _, triple := range buttonConf {
var button *Button
button, err = parseButton(triple)
if err != nil {
return
}
buttons = append(buttons, button)
}
return
}
func parseButton(triple [3]string) (button *Button, err error) {
onClick := &OnClick{
OpenLink: &OpenLink{
URL: triple[2],
},
}
if triple[0] == "text" {
button = &Button{
TextButton: &TextButton{
Text: triple[1],
OnClick: onClick,
},
}
} else if triple[0] == "builtin-icon" {
button = &Button{
ImageButton: &ImageButton{
Icon: triple[1],
OnClick: onClick,
},
}
} else if triple[0] == "custom-icon" {
button = &Button{
ImageButton: &ImageButton{
IconURL: triple[1],
OnClick: onClick,
},
}
} else {
err = fmt.Errorf("Unknown button type %s", triple[0])
}
return
}
// pairs slices every lines in s into two substrings separated by the first pipe
// character and returns a slice of those pairs.
func triples(s string) (ps [][3]string, err error) {
s = strings.TrimSpace(s)
for _, line := range strings.Split(s, "\n") {
var tripple [3]string
tripple, err = splitTriple(line)
if err != nil {
return
}
ps = append(ps, tripple)
}
return
}
func splitTriple(s string) (triple [3]string, err error) {
splitString := strings.SplitN(s, "|", 3)
if len(splitString) == 3 && splitString[0] != "" && splitString[1] != "" && splitString[2] != "" {
triple = [3]string{splitString[0], splitString[1], splitString[2]}
} else {
err = fmt.Errorf("Could not parse button with declaration %s", s)
}
return
}
// OnClick handler object
type OnClick struct {
OpenLink *OpenLink `json:"openLink,omitempty"`
}
// OpenLink object
type OpenLink struct {
URL string `json:"url,omitempty"`
}
// ButtonInput defines the input format of the Button objects
type ButtonInput struct {
Text string `json:"text,omitempty"`
IconURL string `json:"iconUrl,omitempty"` // either iconUrl of icon can be used
Icon string `json:"icon,omitempty"` // either iconUrl of icon can be used
OnClick string `json:"onClick,omitempty"`
}
// KeyValueInput defines the input format of the KeyValue object
type KeyValueInput struct {
TopLabel string `json:"topLabel,omitempty"`
Content string `json:"content,omitempty"`
ContentMultiline bool `json:"contentMultiline,omitempty"`
BottomLabel string `json:"bottomLabel,omitempty"`
OnClick string `json:"onClick,omitempty"`
IconURL string `json:"iconUrl,omitempty"` // either iconUrl of icon can be used
Icon string `json:"icon,omitempty"` // either iconUrl of icon can be used
Button *ButtonInput `json:"button,omitempty"`
}
// simpleToAdvancedFormat converts one of chats markdown-like simple formats to its corresponding html based advanced format
func simpleToAdvancedFormat(simpleFormat string, advancedFormat string, stringToFormat string) string {
var re = regexp.MustCompile("\\" + simpleFormat + "(.+?)\\" + simpleFormat + "")
return re.ReplaceAllString(stringToFormat, "<"+advancedFormat+">${1}</"+advancedFormat+">")
}
// advancedToSimpleFormat converts one of chats html based advanced formats to its corresponding markdown-like simple format
func advancedToSimpleFormat(advancedFormat string, simpleFormat string, stringToFormat string) string {
var re = regexp.MustCompile("<" + advancedFormat + ">(.+?)</" + advancedFormat + ">")
return re.ReplaceAllString(stringToFormat, simpleFormat+"${1}"+simpleFormat)
}
// SimpleToAdvancedFormatting converts google chats simple formatting to the advanced formatting (as far as possible). Code strings and code blocks are not stripped.
func SimpleToAdvancedFormatting(simple string) (formatted string) {
// Replacing links -- has to be done first because the other replacements will mess up this one
var linkRegexp = regexp.MustCompile(`<(.+?)\|(.+?)>`)
formatted = linkRegexp.ReplaceAllString(simple, `<a href="$1">$2</a>`)
// General format replacing
formatted = simpleToAdvancedFormat("*", "b", formatted)
formatted = simpleToAdvancedFormat("_", "i", formatted)
formatted = simpleToAdvancedFormat("~", "strike", formatted)
return
}
// AdvancedToSimpleFormatting converts google chats advanced formatting to the simple formatting (as far as possible)
func AdvancedToSimpleFormatting(simple string) (formatted string) {
// Replacing line break
formatted = strings.ReplaceAll(simple, "<br>", "\n")
// General format replacing
formatted = advancedToSimpleFormat("b", "*", formatted)
formatted = advancedToSimpleFormat("i", "_", formatted)
formatted = advancedToSimpleFormat("strike", "~", formatted)
// Replacing links
var linkRegexp = regexp.MustCompile(`<a href="(.+?)">(.+?)</a>`)
formatted = linkRegexp.ReplaceAllString(formatted, `<$1|$2>`)
// Stripping underline by replacing it with nothing
formatted = advancedToSimpleFormat("u", "", formatted)
// Stripping font color
var fontColorRegexp = regexp.MustCompile(`<font color=".+?">(.+?)</font>`)
formatted = fontColorRegexp.ReplaceAllString(formatted, `$1`)
return
}
|
// FindbinFolder
package DaeseongLib
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
var (
directories = make(map[string]bool)
)
func IsBinDir(path string) bool {
fileStat, err := os.Stat(path)
if err != nil {
return false
}
return fileStat.IsDir()
}
func getRootDrives() (drives []string) {
for _, val := range "ABCDEFGHIJKLMNOPQRSTUVWXYZ" {
_, err := os.Open(string(val) + ":\\")
if err == nil {
drives = append(drives, string(val))
}
}
return
}
func getFoldername(sPath string) string {
filename := strings.Split(sPath, "\\")
return filename[len(filename)-1]
}
func WriteFolderString(sPath, sText string) {
file, err := os.OpenFile(sPath, os.O_RDWR|os.O_APPEND, 0660)
if os.IsNotExist(err) {
file, err = os.Create(sPath)
}
defer file.Close()
if err != nil {
return
}
n, err := io.WriteString(file, sText)
if err != nil {
fmt.Println(n, err)
return
}
}
func findDirectory(sDirectory string) {
err := filepath.Walk(sDirectory, func(filePath string, f os.FileInfo, err error) error {
if IsBinDir(filePath) {
if getFoldername(filePath) == "bin" || getFoldername(filePath) == "obj" || getFoldername(filePath) == "svn" {
directories[filePath] = false
}
}
return nil
})
if err != nil {
fmt.Println("Error:", err)
}
}
func FindAllFolder() {
fmt.Println(time.Now().Format("2006:01:02 15:04:05"))
drives := getRootDrives()
var wait sync.WaitGroup
wait.Add(len(drives))
for _, dr := range drives {
go func(dr string) {
sVal := fmt.Sprintf("%s:\\", dr)
findDirectory(sVal)
wait.Done()
}(dr)
}
wait.Wait()
for folder := range directories {
WriteFolderString("folderlist.txt", folder+"\n")
}
fmt.Println(time.Now().Format("2006:01:02 15:04:05"))
}
/*
func main() {
FindAllFolder()
}
*/
|
package main
import (
"errors"
"net"
"os"
"os/signal"
"syscall"
"github.com/ermanimer/grpc-example/chat/server"
proto "github.com/ermanimer/grpc-example/proto/message"
logger "github.com/ermanimer/slog"
"google.golang.org/grpc"
)
const (
address = "0.0.0.0:9000"
)
func main() {
l := logger.NewLogger(os.Stdout)
s := server.NewServer(l)
gs := grpc.NewServer()
proto.RegisterMessageServiceServer(gs, s)
li, err := net.Listen("tcp4", address)
if err != nil {
_ = l.Log("error", err.Error())
return
}
errs := make(chan error)
go func() {
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM)
errs <- errors.New((<-sc).String())
}()
go func() {
_ = l.Log("transport", "grpc", "address", address)
errs <- gs.Serve(li)
}()
_ = l.Log("error", (<-errs).Error())
s.Stop()
gs.GracefulStop()
}
|
package main
import (
"golang.org/x/tour/tree"
"fmt"
)
// Walk walks the tree t sending all values
// from the tree to the channel ch.
func Walk(t *tree.Tree, ch chan int, level int) {
if t.Left != nil {Walk(t.Left, ch, level + 1)}
ch <- t.Value
if t.Right != nil {Walk(t.Right, ch, level + 1)}
if level == 0 {close(ch)}
}
// Same determines whether the trees
// t1 and t2 contain the same values.
func Same(t1, t2 *tree.Tree) bool {
ch1 := make(chan int)
ch2 := make(chan int)
go Walk(t1, ch1, 0)
go Walk(t2, ch2, 0)
for {
v1, ok1 := <- ch1
v2, ok2 := <- ch2
fmt.Printf("(%+v,%+v) (%+v,%+v)\n", v1, ok1, v2, ok2)
if v1 != v2 {return false}
if !ok1 || !ok2 {return ok1 == ok2}
}
}
func main() {
t1 := tree.New(1)
t2 := tree.New(2)
fmt.Println("Tree 1:", t1)
fmt.Println("Tree 2:", t2)
fmt.Printf("Same(tree1,tree2): %+v\n", Same(t1, t2))
fmt.Printf("Same(tree1,tree1): %+v\n", Same(t1, t1))
}
/*
Tree 1: ((((1 (2)) 3 (4)) 5 ((6) 7 ((8) 9))) 10)
Tree 2: ((((2) 4 (6)) 8 (10 (12))) 14 ((16) 18 (20)))
(1,true) (2,true)
Same(tree1,tree2): false
(1,true) (1,true)
(2,true) (2,true)
(3,true) (3,true)
(4,true) (4,true)
(5,true) (5,true)
(6,true) (6,true)
(7,true) (7,true)
(8,true) (8,true)
(9,true) (9,true)
(10,true) (10,true)
(0,false) (0,false)
Same(tree1,tree1): true
*/
|
package apis
import (
"project/app/admin/models/bo"
"project/app/admin/models/dto"
"project/app/admin/service"
"project/common/api"
"project/utils"
"project/utils/app"
"github.com/gin-gonic/gin"
)
var r = new(service.Role)
// SelectRolesHandler 多条件查询角色
// @Summary 多条件查询角色
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles [get]
func SelectRolesHandler(c *gin.Context) {
// 1.获取参数 校验参数
var role dto.SelectRoleArrayDto
if err := c.ShouldBindQuery(&role); err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
orderJsonData, err := utils.OrderJson(role.Orders)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 2.参数正确执行响应
roleData, err := r.SelectRoles(role, orderJsonData)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回数据
app.ResponseSuccess(c, roleData)
}
// SelectRolesHandler 新增角色
// @Summary 新增角色
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles [post]
func InsertRolesHandler(c *gin.Context) {
// 1.获取参数 校验参数
var insertrole dto.InsertRoleDto
err := c.ShouldBind(&insertrole)
// 2.参数正确执行响应
user, err := api.GetUserMessage(c)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
err = r.InsertRole(insertrole, user.UserId)
if err != nil {
if err.Error()[0:10] == "Error 1062" {
app.ResponseError(c, app.CodeRoleNameExist)
return
}
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回数据
app.ResponseSuccess(c, nil)
}
// SelectRolesHandler 修改角色
// @Summary 修改角色
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles [put]
func UpdateRolesHandler(c *gin.Context) {
// 1.获取参数 校验参数
var updateRole dto.UpdateRoleDto
err := c.ShouldBind(&updateRole)
// 2.参数正确执行响应
user, err := api.GetUserMessage(c)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
err = r.UpdateRole(updateRole, user.UserId)
if err != nil {
if err.Error()[0:10] == "Error 1062" {
app.ResponseError(c, app.CodeRoleNameExist)
return
}
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回数据
app.ResponseSuccess(c, nil)
}
// SelectRolesHandler 删除角色
// @Summary 删除角色
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles [delete]
func DeleteRolesHandler(c *gin.Context) {
// 1.获取参数 校验参数
var ids []int
if err := c.ShouldBindJSON(&ids); err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 2.参数正确执行响应
user, err := api.GetUserMessage(c)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
err = r.DeleteRole(ids, user.UserId)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回数据
app.ResponseSuccess(c, nil)
}
// SelectRolesHandler 修改角色菜单
// @Summary 修改角色菜单
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles/menu [put]
func MenuRolesHandler(c *gin.Context) {
// 1.获取参数 校验参数
var roleMenus dto.RoleMenus
err := c.ShouldBind(&roleMenus)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
userMessage, err := api.GetUserMessage(c)
// 2.参数正确执行响应
err = r.UpdateRoleMenu(roleMenus.ID, roleMenus.Menus, userMessage.UserId)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回数据
app.ResponseSuccess(c, nil)
}
// SelectRolesHandler 获取单个角色
// @Summary 获取单个角色
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles/{id} [put]
func SelectRoleHandler(c *gin.Context, id int) {
role, err := r.SelectRoleOne(id)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回数据
app.ResponseSuccess(c, role)
}
// SelectRolesHandler 返回全部角色
// @Summary 返回全部角色
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles/all [get]
func SelectRolesAllHandler(c *gin.Context) {
// 1.获取数据
role, err := r.SelectRoleAll()
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 2.返回数据
app.ResponseSuccess(c, role)
}
// SelectRolesHandler 导出角色数据
// @Summary 导出角色数据
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles/download [get]
func DownRolesHandler(c *gin.Context) {
// 1.获取参数 校验参数
var role dto.SelectRoleArrayDto
if err := c.ShouldBindQuery(&role); err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
orderJsonData, err := utils.OrderJson(role.Orders)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 2.参数正确执行响应
roleData, err := r.DownloadRoleInfoBo(role, orderJsonData)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
// 3.返回文件数据
var res []interface{}
for _, role := range roleData {
res = append(res, &bo.DownloadRoleInfoBo{
Name: role.Name,
Level: role.Level,
Description: role.Description,
CreateTime: role.CreateTime,
})
}
content := utils.ToExcel([]string{`角色名称`, `角色级别`, `描述`, `创建日期`}, res)
utils.ResponseXls(c, content, "角色数据")
}
// SelectRolesHandler 获取当前登录用户级别
// @Summary 获取当前登录用户级别
// @Description Author:Ymq 2021/01/29 获得身份令牌
// @Tags 系统:系统授权接口 Role Controller
// @Accept application/json
// @Produce application/json
// @Param object body dto.UserLoginDto false "查询参数"
// @Security ApiKeyAuth
// @Success 200 {object} models._ResponseLogin
// @Router /api/roles/level [get]
func LevelRolesHandler(c *gin.Context) {
user, err := api.GetUserData(c)
if err != nil {
app.ResponseError(c, app.CodeParamNotComplete)
return
}
var level bo.SelectCurrentUserLevel
for _, values := range *user.Roles {
if level.Level < values.Level {
level.Level = values.Level
}
}
// 3.返回数据
app.ResponseSuccess(c, level)
}
|
package assgn3Models
//TripPostReq to accept input request for POST operation
type TripPostReq struct {
StartLocationID string `json:"starting_from_location_id"`
DestLocationID []string `json:"location_ids"`
}
//CountID structure to keep the track of "_id"
type CountID struct {
ID string `bson:"_id"`
Seq int `bson:"seq"`
}
//Counter structure to keep track of PUT pointer
type Counter struct {
ID int `bson:"_id"`
Count int `bson:"counter"`
}
//TripPostGetResp to receive the response for POST operation
type TripPostGetResp struct {
ID int `json:"id" bson:"_id"`
Status string `json:"status" bson:"status"`
StartLocationID string `json:"starting_from_location_id" bson:"starting_from_location_id"`
BestRouteLocationID []string `json:"best_route_location_ids" bson:"best_route_location_ids"`
TotalCost int `json:"total_uber_costs" bson:"total_uber_costs"`
TotalDuration int `json:"total_uber_duration" bson:"total_uber_duration"`
TotalDistance float64 `json:"total_distance" bson:"total_distance"`
}
//NextDestination struct
type NextDestination struct {
ID int `json:"id" bson:"_id"`
Status string `json:"status" bson:"status"`
StartLocationID string `json:"starting_from_location_id" bson:"starting_from_location_id"`
NextDestinationLocationID string `json:"next_destination_location_id" bson:"next_destination_location_id"`
BestRouteLocationID []string `json:"best_route_location_ids" bson:"best_route_location_ids"`
TotalCost int `json:"total_uber_costs" bson:"total_uber_costs"`
TotalDuration int `json:"total_uber_duration" bson:"total_uber_duration"`
TotalDistance float64 `json:"total_distance" bson:"total_distance"`
ETA int `json:"uber_wait_time_eta" bson:"uber_wait_time_ets"`
}
//POSTReq struct
type POSTReq struct {
StartLatitude string `json:"start_latitude"`
StartLongitude string `json:"start_longitude"`
EndLatitude string `json:"end_latitude"`
EndLongitude string `json:"end_longitude"`
ProdID string `json:"product_id"`
}
//CoordinatesStruct structure to store data in mongodb and json structure for displaying in POSTMAN
type CoordinatesStruct struct {
ID int `bson:"_id" json:"id"`
Name string `bson:"name" json:"name"`
Address string `bson:"address" json:"address"`
City string `bson:"city" json:"city"`
State string `bson:"state" json:"state"`
Zip string `bson:"zip" json:"zip"`
Coordinates `json:"coordinate"`
}
//Coordinates struct
type Coordinates struct {
Latitude float64 `bson:"lat" json:"lat"`
Longitude float64 `bson:"long" json:"long"`
}
//Respsandbox struct
type Respsandbox struct {
Status string `json:"status"`
ReqID string `json:"request_id"`
Driver string `json:"driver"`
Eta int `json:"eta"`
Location string `json:"location"`
Vehicle string `json:"vehicle"`
Surge int `json:"surge_multiplier"`
}
//Response struct
type Response struct {
Prices []struct {
CurrencyCode string `json:"currency_code"`
DisplayName string `json:"display_name"`
Distance float64 `json:"distance"`
Duration int `json:"duration"`
Estimate string `json:"estimate"`
HighEstimate int `json:"high_estimate"`
LocalizedDisplayName string `json:"localized_display_name"`
LowEstimate int `json:"low_estimate"`
Minimum int `json:"minimum"`
ProductID string `json:"product_id"`
SurgeMultiplier int `json:"surge_multiplier"`
} `json:"prices"`
}
//ProductResp struct
type ProductResp struct {
Products []struct {
Capacity int `json:"capacity"`
Description string `json:"description"`
DisplayName string `json:"display_name"`
Image string `json:"image"`
PriceDetails struct {
Base float64 `json:"base"`
CancellationFee int `json:"cancellation_fee"`
CostPerDistance float64 `json:"cost_per_distance"`
CostPerMinute float64 `json:"cost_per_minute"`
CurrencyCode string `json:"currency_code"`
DistanceUnit string `json:"distance_unit"`
Minimum float64 `json:"minimum"`
ServiceFees []struct {
Fee float64 `json:"fee"`
Name string `json:"name"`
} `json:"service_fees"`
} `json:"price_details"`
ProductID string `json:"product_id"`
} `json:"products"`
}
|
// BSD 3-Clause License
//
// Copyright (c) 2020, Kingsgroup
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package cmd
import (
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"github.com/kingsgroupos/archivist/cli/archivist/g"
"github.com/kingsgroupos/misc"
"github.com/spf13/cobra"
)
const (
TplStruct = "struct"
TplCollection = "collection"
TplCollectionExtension = "collectionExtension"
)
var TplMap = map[string]string{
TplStruct: g.TemplateStruct,
TplCollection: g.TemplateCollection,
TplCollectionExtension: g.TemplateCollectionExtension,
}
var tplsCmd tplsCmdT
var tplsCmdCobra = &cobra.Command{
Use: "tpls",
Short: "Output the default code templates",
Run: tplsCmd.execute,
}
func init() {
rootCmd.AddCommand(tplsCmdCobra)
cmd := tplsCmdCobra
cmd.Flags().StringVarP(&tplsCmd.outputDir,
"outputDir", "o", "", "the output directory")
cmd.Flags().BoolVarP(&tplsCmd.force,
"force", "f", false, "overwrite existing files")
}
type tplsCmdT struct {
outputDir string
force bool
}
func (this *tplsCmdT) execute(cmd *cobra.Command, args []string) {
var a []string
for k, v := range TplMap {
a = append(a, k, v)
}
if this.outputDir == "" {
for i := 0; i < len(a); i += 2 {
fmt.Printf("=== %s %s\n\n%s\n\n", a[i], strings.Repeat("=", 60-len(a[i])), a[i+1])
}
return
}
if err := misc.FindDirectory(this.outputDir); err != nil {
panic(fmt.Errorf("%s does not exist", this.outputDir))
}
for i := 0; i < len(a); i += 2 {
fPath := filepath.Join(this.outputDir, a[i]+".tpl")
if err := misc.FindFile(fPath); err == nil {
if !this.force {
panic(fmt.Errorf("%s.tpl already exists, add -f to overwrite it", a[i]))
}
}
}
for i := 0; i < len(a); i += 2 {
fPath := filepath.Join(this.outputDir, a[i]+".tpl")
trimmed := strings.TrimLeft(a[i+1], "\r\n")
if err := ioutil.WriteFile(fPath, []byte(trimmed), 0644); err != nil {
panic(err)
}
fmt.Println(fPath)
}
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package repository
import (
"context"
"net/url"
"strconv"
"testing"
. "github.com/smartystreets/goconvey/convey"
"github.com/diegobernardes/flare"
)
func TestResourceGenResourceSegments(t *testing.T) {
tests := []struct {
expect string
result string
resources []flare.Resource
qtySegments int
want [][]string
}{
{
"When the list is nil",
"The result should be a empty list of flare.Resource",
nil,
0,
[][]string{},
},
{
"When the list is not nil",
`The result should contain a list of list of strings with the flare.Resource id and each
path segment`,
[]flare.Resource{
{ID: "1", Endpoint: url.URL{Path: "/product/123/stock/{*}"}},
{ID: "2", Endpoint: url.URL{Path: "/product/{*}/stock/{*}"}},
{ID: "3", Endpoint: url.URL{Path: "/product/456/stock/{*}"}},
},
5,
[][]string{
{"1", "", "product", "123", "stock", "{*}"},
{"3", "", "product", "456", "stock", "{*}"},
{"2", "", "product", "{*}", "stock", "{*}"},
},
},
}
Convey("Given a list of flare.Resource", t, func() {
for _, tt := range tests {
Convey(tt.expect, func() {
var r Resource
result := r.genResourceSegments(tt.resources, tt.qtySegments)
Convey(tt.result, func() { So(result, ShouldResemble, tt.want) })
})
}
})
}
func TestResourceFindOne(t *testing.T) {
Convey("Given a Resource", t, func() {
c := NewClient()
r := c.Resource()
Convey("It should not find a flare.Resource with id 1", func() {
resource, err := r.FindByID(context.Background(), "1")
So(resource, ShouldBeNil)
So(err, ShouldBeError)
nErr, ok := err.(flare.ResourceRepositoryError)
So(ok, ShouldBeTrue)
So(nErr.NotFound(), ShouldBeTrue)
})
Convey("When a list of flare.Resource is inserted", func() {
for i := (int64)(0); i < 10; i++ {
err := r.Create(context.Background(), &flare.Resource{
ID: strconv.FormatInt(i, 10),
Endpoint: url.URL{Host: strconv.FormatInt(i, 10)},
})
So(err, ShouldBeNil)
}
Convey("It should find the each flare.Resource by id", func() {
for i := (int64)(0); i < 10; i++ {
id := strconv.FormatInt(i, 10)
resource, err := r.FindByID(context.Background(), id)
So(resource, ShouldNotBeNil)
So(err, ShouldBeNil)
So(resource.ID, ShouldEqual, id)
}
})
})
})
}
func TestResourceCreate(t *testing.T) {
Convey("Given a Resource", t, func() {
c := NewClient()
r := c.Resource()
Convey("It should be possible to insert a flare.Resource with id 1", func() {
err := r.Create(context.Background(), &flare.Resource{ID: "1"})
So(err, ShouldBeNil)
Convey("It should not be possible to insert another flare.Resource with id 1", func() {
err := r.Create(context.Background(), &flare.Resource{ID: "1"})
So(err, ShouldNotBeNil)
nErr, ok := err.(flare.ResourceRepositoryError)
So(ok, ShouldBeTrue)
So(nErr.AlreadyExists(), ShouldBeTrue)
})
})
Convey("It should be possible to insert a flare.Resource with app.com address", func() {
err := r.Create(context.Background(), &flare.Resource{
ID: "1",
Endpoint: url.URL{Scheme: "http", Host: "app.com"},
})
So(err, ShouldBeNil)
msg := "It should not be possible to insert another flare.Resource at the same address"
Convey(msg, func() {
err := r.Create(context.Background(), &flare.Resource{
ID: "2",
Endpoint: url.URL{Scheme: "http", Host: "app.com"},
})
So(err, ShouldNotBeNil)
nErr, ok := err.(flare.ResourceRepositoryError)
So(ok, ShouldBeTrue)
So(nErr.AlreadyExists(), ShouldBeTrue)
})
})
})
}
func TestResourceDelete(t *testing.T) {
Convey("Given a Resource", t, func() {
c := NewClient()
r := c.Resource()
Convey("It should not be possible to delete a flare.Resource that does not exist", func() {
err := r.Delete(context.Background(), "1")
So(err, ShouldNotBeNil)
nErr, ok := err.(flare.ResourceRepositoryError)
So(ok, ShouldBeTrue)
So(nErr.NotFound(), ShouldBeTrue)
})
})
}
|
package catalogsource
import (
"context"
"reflect"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
)
/*
UpdateStatus can be used to update the status of the provided catalog source. Note that
the caller is responsible for ensuring accurate status values in the catsrc argument (i.e.
the status is used as-is).
• logger: used to log errors only
• client: used to fetch / update catalog source status
• catsrc: the CatalogSource to use as a source for status updates. Callers are
responsible for updating the catalog source status values as necessary.
*/
func UpdateStatus(logger *logrus.Entry, client versioned.Interface, catsrc *v1alpha1.CatalogSource) error {
// make the status update if possible
if _, err := client.OperatorsV1alpha1().CatalogSources(catsrc.GetNamespace()).UpdateStatus(context.TODO(), catsrc, metav1.UpdateOptions{}); err != nil {
logger.WithError(err).Error("UpdateStatus - error while setting CatalogSource status")
return err
}
return nil
}
/*
UpdateStatusWithConditions can be used to update the status conditions for the provided catalog source.
This function will make no changes to the other status fields (those fields will be used as-is).
If the provided conditions do not result in any status condition changes, then the API server will not be updated.
Note that the caller is responsible for ensuring accurate status values for all other fields.
• logger: used to log errors only
• client: used to fetch / update catalog source status
• catsrc: the CatalogSource to use as a source for status updates.
• conditions: condition values to be updated
*/
func UpdateStatusWithConditions(logger *logrus.Entry, client versioned.Interface, catsrc *v1alpha1.CatalogSource, conditions ...metav1.Condition) error {
// make a copy of the status before we make the change
statusBefore := catsrc.Status.DeepCopy()
// update the conditions
for _, condition := range conditions {
meta.SetStatusCondition(&catsrc.Status.Conditions, condition)
}
// don't bother updating if no changes were made
if reflect.DeepEqual(catsrc.Status.Conditions, statusBefore.Conditions) {
logger.Debug("UpdateStatusWithConditions - request to update status conditions did not result in any changes, so updates were not made")
return nil
}
// make the update if possible
if _, err := client.OperatorsV1alpha1().CatalogSources(catsrc.GetNamespace()).UpdateStatus(context.TODO(), catsrc, metav1.UpdateOptions{}); err != nil {
logger.WithError(err).Error("UpdateStatusWithConditions - unable to update CatalogSource image reference")
return err
}
return nil
}
/*
UpdateSpecAndStatusConditions can be used to update the catalog source with the provided status conditions.
This will update the spec and status portions of the catalog source. Calls to the API server will occur
even if the provided conditions result in no changes.
• logger: used to log errors only
• client: used to fetch / update catalog source
• catsrc: the CatalogSource to use as a source for image and status condition updates.
• conditions: condition values to be updated
*/
func UpdateSpecAndStatusConditions(logger *logrus.Entry, client versioned.Interface, catsrc *v1alpha1.CatalogSource, conditions ...metav1.Condition) error {
// update the conditions
for _, condition := range conditions {
meta.SetStatusCondition(&catsrc.Status.Conditions, condition)
}
// make the update if possible
if _, err := client.OperatorsV1alpha1().CatalogSources(catsrc.GetNamespace()).Update(context.TODO(), catsrc, metav1.UpdateOptions{}); err != nil {
logger.WithError(err).Error("UpdateSpecAndStatusConditions - unable to update CatalogSource image reference")
return err
}
return nil
}
/*
RemoveStatusConditions can be used to remove the status conditions for the provided catalog source.
This function will make no changes to the other status fields (those fields will be used as-is).
If the provided conditions do not result in any status condition changes, then the API server will not be updated.
Note that the caller is responsible for ensuring accurate status values for all other fields.
• logger: used to log errors only
• client: used to fetch / update catalog source status
• catsrc: the CatalogSource to use as a source for status condition removal.
• conditionTypes: condition types to be removed
*/
func RemoveStatusConditions(logger *logrus.Entry, client versioned.Interface, catsrc *v1alpha1.CatalogSource, conditionTypes ...string) error {
// make a copy of the status before we make the change
statusBefore := catsrc.Status.DeepCopy()
// remove the conditions
for _, conditionType := range conditionTypes {
meta.RemoveStatusCondition(&catsrc.Status.Conditions, conditionType)
}
// don't bother updating if no changes were made
if reflect.DeepEqual(catsrc.Status.Conditions, statusBefore.Conditions) {
logger.Debug("RemoveStatusConditions - request to remove status conditions did not result in any changes, so updates were not made")
return nil
}
// make the update if possible
if _, err := client.OperatorsV1alpha1().CatalogSources(catsrc.GetNamespace()).UpdateStatus(context.TODO(), catsrc, metav1.UpdateOptions{}); err != nil {
logger.WithError(err).Error("RemoveStatusConditions - unable to update CatalogSource image reference")
return err
}
return nil
}
|
package main
import "github.com/Caik/go-stream-broadcast/internal/reader"
func main() {
reader.Serve()
}
|
package start
import (
c "github.com/zond/godip/variants/classical/common"
dip "github.com/zond/godip/common"
"github.com/zond/godip/graph"
)
func SCs() (result map[dip.Province]dip.Nation) {
result = map[dip.Province]dip.Nation{}
g := Graph()
for _, prov := range g.Provinces() {
if nat := g.SC(prov); nat != nil {
result[prov] = *nat
}
}
return
}
func Graph() *graph.Graph {
return graph.New().
// nat
Prov("nat").Conn("nrg", c.Sea).Conn("cly", c.Sea).Conn("lvp", c.Sea).Conn("iri", c.Sea).Conn("mid", c.Sea).Flag(c.Sea).
// nrg
Prov("nrg").Conn("nat", c.Sea).Conn("bar", c.Sea).Conn("nwy", c.Sea).Conn("nth", c.Sea).Conn("edi", c.Sea).Conn("cly", c.Sea).Flag(c.Sea).
// bar
Prov("bar").Conn("nrg", c.Sea).Conn("stp/nc", c.Sea).Conn("nwy", c.Sea).Conn("stp", c.Sea).Flag(c.Sea).
// stp/nc
Prov("stp/nc").Conn("bar", c.Sea).Conn("nwy", c.Sea).Flag(c.Sea).
// stp
Prov("stp").Conn("fin", c.Land).Conn("nwy", c.Land).Conn("mos", c.Land).Conn("lvn", c.Land).Flag(c.Land).Conn("bar", c.Sea).Conn("bot", c.Sea).SC(c.Russia).
// mos
Prov("mos").Conn("stp", c.Land).Conn("sev", c.Land).Conn("ukr", c.Land).Conn("war", c.Land).Conn("lvn", c.Land).Flag(c.Land).SC(c.Russia).
// sev
Prov("sev").Conn("ukr", c.Land).Conn("mos", c.Land).Conn("arm", c.Coast...).Conn("bla", c.Sea).Conn("rum", c.Coast...).Flag(c.Coast...).SC(c.Russia).
// arm
Prov("arm").Conn("ank", c.Coast...).Conn("bla", c.Sea).Conn("sev", c.Coast...).Conn("syr", c.Land).Conn("smy", c.Land).Flag(c.Coast...).
// syr
Prov("syr").Conn("eas", c.Sea).Conn("smy", c.Coast...).Conn("arm", c.Land).Flag(c.Coast...).
// eas
Prov("eas").Conn("ion", c.Sea).Conn("aeg", c.Sea).Conn("smy", c.Sea).Conn("syr", c.Sea).Flag(c.Sea).
// ion
Prov("ion").Conn("apu", c.Sea).Conn("adr", c.Sea).Conn("tun", c.Sea).Conn("tys", c.Sea).Conn("nap", c.Sea).Conn("alb", c.Sea).Conn("gre", c.Sea).Conn("aeg", c.Sea).Conn("eas", c.Sea).Flag(c.Sea).
// tun
Prov("tun").Conn("naf", c.Coast...).Conn("wes", c.Sea).Conn("tys", c.Sea).Conn("ion", c.Sea).Flag(c.Coast...).SC(c.Neutral).
// naf
Prov("naf").Conn("mid", c.Sea).Conn("wes", c.Sea).Conn("tun", c.Coast...).Flag(c.Coast...).
// mid
Prov("mid").Conn("wes", c.Sea).Conn("nat", c.Sea).Conn("iri", c.Sea).Conn("eng", c.Sea).Conn("bre", c.Sea).Conn("gas", c.Sea).Conn("spa/nc", c.Sea).Conn("por", c.Sea).Conn("spa/sc", c.Sea).Conn("naf", c.Sea).Conn("spa", c.Sea).Flag(c.Sea).
// iri
Prov("iri").Conn("nat", c.Sea).Conn("lvp", c.Sea).Conn("wal", c.Sea).Conn("eng", c.Sea).Conn("mid", c.Sea).Flag(c.Sea).
// lvp
Prov("lvp").Conn("iri", c.Sea).Conn("nat", c.Sea).Conn("cly", c.Coast...).Conn("edi", c.Land).Conn("yor", c.Land).Conn("wal", c.Coast...).Flag(c.Coast...).SC(c.England).
// cly
Prov("cly").Conn("nat", c.Sea).Conn("nrg", c.Sea).Conn("edi", c.Coast...).Conn("lvp", c.Coast...).Flag(c.Coast...).
// edi
Prov("edi").Conn("cly", c.Coast...).Conn("nrg", c.Sea).Conn("nth", c.Sea).Conn("yor", c.Coast...).Conn("lvp", c.Land).Flag(c.Coast...).SC(c.England).
// nth
Prov("nth").Conn("eng", c.Sea).Conn("edi", c.Sea).Conn("nrg", c.Sea).Conn("nwy", c.Sea).Conn("ska", c.Sea).Conn("den", c.Sea).Conn("hel", c.Sea).Conn("hol", c.Sea).Conn("bel", c.Sea).Conn("lon", c.Sea).Conn("yor", c.Sea).Flag(c.Sea).
// nwy
Prov("nwy").Conn("nth", c.Sea).Conn("nrg", c.Sea).Conn("bar", c.Sea).Conn("stp/nc", c.Sea).Conn("stp", c.Land).Conn("fin", c.Land).Conn("swe", c.Coast...).Conn("ska", c.Sea).Flag(c.Coast...).SC(c.Neutral).
// stp/sc
Prov("stp/sc").Conn("bot", c.Sea).Conn("fin", c.Sea).Conn("lvn", c.Sea).Flag(c.Sea).
// lvn
Prov("lvn").Conn("stp", c.Land).Conn("bal", c.Sea).Conn("bot", c.Sea).Conn("stp/sc", c.Sea).Conn("mos", c.Land).Conn("war", c.Land).Conn("pru", c.Coast...).Flag(c.Coast...).
// war
Prov("war").Conn("sil", c.Land).Conn("pru", c.Land).Conn("lvn", c.Land).Conn("mos", c.Land).Conn("ukr", c.Land).Conn("gal", c.Land).Flag(c.Land).SC(c.Russia).
// ukr
Prov("ukr").Conn("war", c.Land).Conn("mos", c.Land).Conn("sev", c.Land).Conn("rum", c.Land).Conn("gal", c.Land).Flag(c.Land).
// bla
Prov("bla").Conn("bul/ec", c.Sea).Conn("rum", c.Sea).Conn("sev", c.Sea).Conn("arm", c.Sea).Conn("ank", c.Sea).Conn("con", c.Sea).Conn("bul", c.Sea).Flag(c.Sea).
// ank
Prov("ank").Conn("con", c.Coast...).Conn("bla", c.Sea).Conn("arm", c.Coast...).Conn("smy", c.Land).Flag(c.Coast...).SC(c.Turkey).
// smy
Prov("smy").Conn("aeg", c.Sea).Conn("con", c.Coast...).Conn("ank", c.Land).Conn("arm", c.Land).Conn("syr", c.Coast...).Conn("eas", c.Sea).Flag(c.Coast...).SC(c.Turkey).
// aeg
Prov("aeg").Conn("eas", c.Sea).Conn("ion", c.Sea).Conn("gre", c.Sea).Conn("bul/sc", c.Sea).Conn("con", c.Sea).Conn("smy", c.Sea).Conn("bul", c.Sea).Flag(c.Sea).
// gre
Prov("gre").Conn("ion", c.Sea).Conn("alb", c.Coast...).Conn("ser", c.Land).Conn("bul", c.Land).Conn("bul/sc", c.Sea).Conn("aeg", c.Sea).Flag(c.Coast...).SC(c.Neutral).
// nap
Prov("nap").Conn("tys", c.Sea).Conn("rom", c.Coast...).Conn("apu", c.Coast...).Conn("ion", c.Sea).Flag(c.Coast...).SC(c.Italy).
// tys
Prov("tys").Conn("wes", c.Sea).Conn("gol", c.Sea).Conn("tus", c.Sea).Conn("rom", c.Sea).Conn("nap", c.Sea).Conn("ion", c.Sea).Conn("tun", c.Sea).Flag(c.Sea).
// wes
Prov("wes").Conn("mid", c.Sea).Conn("spa/sc", c.Sea).Conn("gol", c.Sea).Conn("tys", c.Sea).Conn("tun", c.Sea).Conn("naf", c.Sea).Conn("spa", c.Sea).Flag(c.Sea).
// spa/sc
Prov("spa/sc").Conn("mid", c.Sea).Conn("por", c.Sea).Conn("mar", c.Sea).Conn("gol", c.Sea).Conn("wes", c.Sea).Flag(c.Sea).
// spa
Prov("spa").Conn("por", c.Land).Conn("gas", c.Land).Conn("mar", c.Land).Conn("mid", c.Sea).Conn("gol", c.Sea).Conn("wes", c.Sea).Flag(c.Land).SC(c.Neutral).
// spa/nc
Prov("spa/nc").Conn("por", c.Sea).Conn("mid", c.Sea).Conn("gas", c.Sea).Flag(c.Sea).
// por
Prov("por").Conn("mid", c.Sea).Conn("spa/nc", c.Sea).Conn("spa", c.Land).Conn("spa/sc", c.Sea).Flag(c.Coast...).SC(c.Neutral).
// gas
Prov("gas").Conn("mid", c.Sea).Conn("bre", c.Coast...).Conn("par", c.Land).Conn("bur", c.Land).Conn("mar", c.Land).Conn("spa", c.Land).Conn("spa/nc", c.Sea).Flag(c.Coast...).
// bre
Prov("bre").Conn("mid", c.Sea).Conn("eng", c.Sea).Conn("pic", c.Coast...).Conn("par", c.Land).Conn("gas", c.Coast...).Flag(c.Coast...).SC(c.France).
// eng
Prov("eng").Conn("mid", c.Sea).Conn("iri", c.Sea).Conn("wal", c.Sea).Conn("lon", c.Sea).Conn("nth", c.Sea).Conn("bel", c.Sea).Conn("pic", c.Sea).Conn("bre", c.Sea).Flag(c.Sea).
// wal
Prov("wal").Conn("iri", c.Sea).Conn("lvp", c.Coast...).Conn("yor", c.Land).Conn("lon", c.Coast...).Conn("eng", c.Sea).Flag(c.Coast...).
// yor
Prov("yor").Conn("lvp", c.Land).Conn("edi", c.Coast...).Conn("nth", c.Sea).Conn("lon", c.Coast...).Conn("wal", c.Land).Flag(c.Coast...).
// ska
Prov("ska").Conn("nth", c.Sea).Conn("nwy", c.Sea).Conn("swe", c.Sea).Conn("den", c.Sea).Flag(c.Sea).
// swe
Prov("swe").Conn("ska", c.Sea).Conn("nwy", c.Coast...).Conn("fin", c.Coast...).Conn("bot", c.Sea).Conn("bal", c.Sea).Conn("den", c.Coast...).Flag(c.Coast...).SC(c.Neutral).
// fin
Prov("fin").Conn("nwy", c.Land).Conn("bot", c.Sea).Conn("swe", c.Coast...).Conn("stp", c.Land).Conn("stp/sc", c.Sea).Flag(c.Coast...).
// bot
Prov("bot").Conn("swe", c.Sea).Conn("fin", c.Sea).Conn("stp/sc", c.Sea).Conn("lvn", c.Sea).Conn("bal", c.Sea).Conn("stp", c.Sea).Flag(c.Sea).
// bal
Prov("bal").Conn("den", c.Sea).Conn("swe", c.Sea).Conn("bot", c.Sea).Conn("lvn", c.Sea).Conn("pru", c.Sea).Conn("ber", c.Sea).Conn("kie", c.Sea).Flag(c.Sea).
// pru
Prov("pru").Conn("ber", c.Coast...).Conn("bal", c.Sea).Conn("lvn", c.Coast...).Conn("war", c.Land).Conn("sil", c.Land).Flag(c.Coast...).
// sil
Prov("sil").Conn("mun", c.Land).Conn("ber", c.Land).Conn("pru", c.Land).Conn("war", c.Land).Conn("gal", c.Land).Conn("boh", c.Land).Flag(c.Land).
// gal
Prov("gal").Conn("boh", c.Land).Conn("sil", c.Land).Conn("war", c.Land).Conn("ukr", c.Land).Conn("rum", c.Land).Conn("bud", c.Land).Conn("vie", c.Land).Flag(c.Land).
// rum
Prov("rum").Conn("bla", c.Sea).Conn("bud", c.Land).Conn("gal", c.Land).Conn("ukr", c.Land).Conn("sev", c.Coast...).Conn("bul/ec", c.Sea).Conn("bul", c.Land).Conn("ser", c.Land).Flag(c.Coast...).SC(c.Neutral).
// bul/ec
Prov("bul/ec").Conn("rum", c.Sea).Conn("bla", c.Sea).Conn("con", c.Sea).Flag(c.Sea).
// bul
Prov("bul").Conn("ser", c.Land).Conn("rum", c.Land).Conn("con", c.Land).Conn("gre", c.Land).Flag(c.Land).Conn("aeg", c.Sea).Conn("bla", c.Sea).SC(c.Neutral).
// con
Prov("con").Conn("bul/sc", c.Sea).Conn("bul", c.Land).Conn("bul/ec", c.Sea).Conn("bla", c.Sea).Conn("ank", c.Coast...).Conn("smy", c.Coast...).Conn("aeg", c.Sea).Flag(c.Coast...).SC(c.Turkey).
// bul/sc
Prov("bul/sc").Conn("gre", c.Sea).Conn("con", c.Sea).Conn("aeg", c.Sea).Flag(c.Sea).
// ser
Prov("ser").Conn("tri", c.Land).Conn("bud", c.Land).Conn("rum", c.Land).Conn("bul", c.Land).Conn("gre", c.Land).Conn("alb", c.Land).Flag(c.Land).SC(c.Neutral).
// alb
Prov("alb").Conn("adr", c.Sea).Conn("tri", c.Coast...).Conn("ser", c.Land).Conn("gre", c.Coast...).Conn("ion", c.Sea).Flag(c.Coast...).
// adr
Prov("adr").Conn("ven", c.Sea).Conn("tri", c.Sea).Conn("alb", c.Sea).Conn("ion", c.Sea).Conn("apu", c.Sea).Flag(c.Sea).
// apu
Prov("apu").Conn("rom", c.Land).Conn("ven", c.Coast...).Conn("adr", c.Sea).Conn("ion", c.Sea).Conn("nap", c.Coast...).Flag(c.Coast...).
// rom
Prov("rom").Conn("tys", c.Sea).Conn("tus", c.Coast...).Conn("ven", c.Land).Conn("apu", c.Land).Conn("nap", c.Coast...).Flag(c.Coast...).SC(c.Italy).
// tus
Prov("tus").Conn("gol", c.Sea).Conn("pie", c.Coast...).Conn("ven", c.Land).Conn("rom", c.Coast...).Conn("tys", c.Sea).Flag(c.Coast...).
// gol
Prov("gol").Conn("spa/sc", c.Sea).Conn("mar", c.Sea).Conn("pie", c.Sea).Conn("tus", c.Sea).Conn("tys", c.Sea).Conn("wes", c.Sea).Conn("spa", c.Sea).Flag(c.Sea).
// mar
Prov("mar").Conn("spa", c.Land).Conn("gas", c.Land).Conn("bur", c.Land).Conn("pie", c.Coast...).Conn("gol", c.Sea).Conn("spa/sc", c.Sea).Flag(c.Coast...).SC(c.France).
// bur
Prov("bur").Conn("par", c.Land).Conn("pic", c.Land).Conn("bel", c.Land).Conn("ruh", c.Land).Conn("mun", c.Land).Conn("mar", c.Land).Conn("gas", c.Land).Flag(c.Land).
// par
Prov("par").Conn("bre", c.Land).Conn("pic", c.Land).Conn("bur", c.Land).Conn("gas", c.Land).Flag(c.Land).SC(c.France).
// pic
Prov("pic").Conn("bre", c.Coast...).Conn("eng", c.Sea).Conn("bel", c.Coast...).Conn("bur", c.Land).Conn("par", c.Land).Flag(c.Coast...).
// lon
Prov("lon").Conn("wal", c.Coast...).Conn("yor", c.Coast...).Conn("nth", c.Sea).Conn("eng", c.Sea).Flag(c.Coast...).SC(c.England).
// bel
Prov("bel").Conn("pic", c.Coast...).Conn("eng", c.Sea).Conn("nth", c.Sea).Conn("hol", c.Coast...).Conn("ruh", c.Land).Conn("bur", c.Land).Flag(c.Coast...).SC(c.Neutral).
// hol
Prov("hol").Conn("nth", c.Sea).Conn("hel", c.Sea).Conn("kie", c.Coast...).Conn("ruh", c.Land).Conn("bel", c.Coast...).Flag(c.Coast...).SC(c.Neutral).
// hel
Prov("hel").Conn("nth", c.Sea).Conn("den", c.Sea).Conn("kie", c.Sea).Conn("hol", c.Sea).Flag(c.Sea).
// den
Prov("den").Conn("hel", c.Sea).Conn("nth", c.Sea).Conn("ska", c.Sea).Conn("swe", c.Coast...).Conn("bal", c.Sea).Conn("kie", c.Coast...).Flag(c.Coast...).SC(c.Neutral).
// ber
Prov("ber").Conn("kie", c.Coast...).Conn("bal", c.Sea).Conn("pru", c.Coast...).Conn("sil", c.Land).Conn("mun", c.Land).Flag(c.Coast...).SC(c.Germany).
// mun
Prov("mun").Conn("bur", c.Land).Conn("ruh", c.Land).Conn("kie", c.Land).Conn("ber", c.Land).Conn("sil", c.Land).Conn("boh", c.Land).Conn("tyr", c.Land).Flag(c.Land).SC(c.Germany).
// boh
Prov("boh").Conn("mun", c.Land).Conn("sil", c.Land).Conn("gal", c.Land).Conn("vie", c.Land).Conn("tyr", c.Land).Flag(c.Land).
// vie
Prov("vie").Conn("tyr", c.Land).Conn("boh", c.Land).Conn("gal", c.Land).Conn("bud", c.Land).Conn("tri", c.Land).Flag(c.Land).SC(c.Austria).
// bud
Prov("bud").Conn("tri", c.Land).Conn("vie", c.Land).Conn("gal", c.Land).Conn("rum", c.Land).Conn("ser", c.Land).Flag(c.Land).SC(c.Austria).
// tri
Prov("tri").Conn("adr", c.Sea).Conn("ven", c.Coast...).Conn("tyr", c.Land).Conn("vie", c.Land).Conn("bud", c.Land).Conn("ser", c.Land).Conn("alb", c.Coast...).Flag(c.Coast...).SC(c.Austria).
// ven
Prov("ven").Conn("tus", c.Land).Conn("pie", c.Land).Conn("tyr", c.Land).Conn("tri", c.Coast...).Conn("adr", c.Sea).Conn("apu", c.Coast...).Conn("rom", c.Land).Flag(c.Coast...).SC(c.Italy).
// pie
Prov("pie").Conn("mar", c.Coast...).Conn("tyr", c.Land).Conn("ven", c.Land).Conn("tus", c.Coast...).Conn("gol", c.Sea).Flag(c.Coast...).
// ruh
Prov("ruh").Conn("bel", c.Land).Conn("hol", c.Land).Conn("kie", c.Land).Conn("mun", c.Land).Conn("bur", c.Land).Flag(c.Land).
// tyr
Prov("tyr").Conn("mun", c.Land).Conn("boh", c.Land).Conn("vie", c.Land).Conn("tri", c.Land).Conn("ven", c.Land).Conn("pie", c.Land).Flag(c.Land).
// kie
Prov("kie").Conn("hol", c.Coast...).Conn("hel", c.Sea).Conn("den", c.Coast...).Conn("bal", c.Sea).Conn("ber", c.Coast...).Conn("mun", c.Land).Conn("ruh", c.Land).Flag(c.Coast...).SC(c.Germany).
Done()
}
|
package main
import "fmt"
func main() {
i:=1; j:=0;
fmt.Println("Хуваахын өмнө...");
i = i / j; /* 0-д хуваах алдаа */
fmt.Println("Дараа нь")
}
|
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package app
// [START sample]
import (
"net/http"
"google.golang.org/appengine"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/log"
)
func logHandler(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
post := &Post{Body: "sample post"}
key := datastore.NewIncompleteKey(ctx, "Posts", nil)
if _, err := datastore.Put(ctx, key, post); err != nil {
log.Errorf(ctx, "could not put into datastore: %v", err)
http.Error(w, "An error occurred. Try again.", http.StatusInternalServerError)
return
}
log.Debugf(ctx, "Datastore put successful")
w.Write([]byte("ok!"))
}
// [END sample]
type Post struct {
Body string
}
func init() {
http.HandleFunc("/log", logHandler)
}
|
package ircserver
import (
"sort"
"strings"
"gopkg.in/sorcix/irc.v2"
)
func init() {
Commands["NAMES"] = &ircCommand{
Func: (*IRCServer).cmdNames,
}
}
func (i *IRCServer) cmdNames(s *Session, reply *Replyctx, msg *irc.Message) {
if len(msg.Params) > 0 {
channelname := msg.Params[0]
if c, ok := i.channels[ChanToLower(channelname)]; ok {
nicks := make([]string, 0, len(c.nicks))
for nick, perms := range c.nicks {
var prefix string
if perms[chanop] {
prefix = prefix + string('@')
}
nicks = append(nicks, prefix+i.nicks[nick].Nick)
}
sort.Strings(nicks)
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_NAMREPLY,
Params: []string{s.Nick, "=", channelname, strings.Join(nicks, " ")},
})
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_ENDOFNAMES,
Params: []string{s.Nick, channelname, "End of /NAMES list."},
})
return
}
}
i.sendUser(s, reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.RPL_ENDOFNAMES,
Params: []string{s.Nick, "*", "End of /NAMES list."},
})
}
|
package main
import (
"fmt"
// "net/url"
"github.com/kavenegar/kavenegar-go"
)
func main() {
api := kavenegar.New(" your apikey ")
//Verify.VerifyLookup
receptor := ""
template := ""
token := ""
params := &kavenegar.VerifyLookupParam{
// Type: kavenegar.Type_VerifyLookup_Sms
}
if res, err := api.Verify.Lookup(receptor, template, token, params); err != nil {
switch err := err.(type) {
case *kavenegar.APIError:
fmt.Println(err.Error())
case *kavenegar.HTTPError:
fmt.Println(err.Error())
default:
fmt.Println(err.Error())
}
} else {
fmt.Println("MessageID = ", res.MessageID)
fmt.Println("Status = ", res.Status)
//...
}
//Verify.CreateLookup
// v := url.Values{}
// v.Set("receptor", "")
// v.Add("template", "")
// v.Add("token", "")
// //v.Add("token2", "")
// //v.Add("token3", "")
// v.Add("token", kavenegar.Type_VerifyLookup_Sms.String())
// if res, err := api.Verify.CreateLookup(v); err != nil {
// switch err := err.(type) {
// case *kavenegar.APIError:
// fmt.Println(err.Error())
// case *kavenegar.HTTPError:
// fmt.Println(err.Error())
// default:
// fmt.Println(err.Error())
// }
// } else {
// fmt.Println("MessageID = ", res.MessageID)
// fmt.Println("Status = ", res.Status)
// //...
// }
}
|
/*
There are n rooms labeled from 0 to n - 1 and all the rooms are locked except for room 0. Your goal is to visit all the rooms. However, you cannot enter a locked room without having its key.
When you visit a room, you may find a set of distinct keys in it. Each key has a number on it, denoting which room it unlocks, and you can take all of them with you to unlock the other rooms.
Given an array rooms where rooms[i] is the set of keys that you can obtain if you visited room i, return true if you can visit all the rooms, or false otherwise.
Example 1:
Input: rooms = [[1],[2],[3],[]]
Output: true
Explanation:
We visit room 0 and pick up key 1.
We then visit room 1 and pick up key 2.
We then visit room 2 and pick up key 3.
We then visit room 3.
Since we were able to visit every room, we return true.
Example 2:
Input: rooms = [[1,3],[3,0,1],[2],[0]]
Output: false
Explanation: We can not enter room number 2 since the only key that unlocks it is in that room.
Constraints:
n == rooms.length
2 <= n <= 1000
0 <= rooms[i].length <= 1000
1 <= sum(rooms[i].length) <= 3000
0 <= rooms[i][j] < n
All the values of rooms[i] are unique.
*/
package main
func main() {
assert(visitall([][]int{{1}, {2}, {3}, {}}) == true)
assert(visitall([][]int{{1, 3}, {3, 0, 1}, {2}, {0}}) == false)
assert(visitall([][]int{}) == true)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func visitall(rooms [][]int) bool {
if len(rooms) == 0 {
return true
}
seen := make(map[int]bool)
walk(rooms, 0, seen)
for i := range rooms {
if !seen[i] {
return false
}
}
return true
}
func walk(rooms [][]int, node int, seen map[int]bool) {
seen[node] = true
for _, child := range rooms[node] {
if !seen[child] {
walk(rooms, child, seen)
}
}
}
|
package fluent
import (
"fmt"
"log"
"strings"
"sync"
)
const (
whereClause = "WHERE"
andClause = "AND"
orClause = "OR"
isNullClause = "IS NULL"
isNotNullClause = "IS NOT NULL"
selectStatement = "SELECT %s FROM %s"
insertStatement = "INSERT INTO %s (%s) VALUES (%s) RETURNING id"
updateStatement = "UPDATE %s SET"
joinStatement = " INNER JOIN %s ON %s = %s"
leftJoinStatement = " LEFT JOIN %s ON %s = %s"
whereStatement = " %s %s %s $%d"
whereNullStatement = " %s %s %s"
groupByStatement = " GROUP BY %s"
orderByStatement = " ORDER BY %s"
limitStatement = " LIMIT $%d"
offsetStatement = " OFFSET $%d"
)
type query struct {
stmt string
columns []string
table string
join, leftJoin [][]interface{}
where, whereNull [][]interface{}
orderBy, groupBy []string
limit, offset int
args []interface{}
argCounter int
debug bool
mutex *sync.RWMutex
}
func newQuery() *query {
return &query{
argCounter: 1,
mutex: &sync.RWMutex{},
}
}
func (q *query) log() {
if q.debug {
log.Println(q.stmt, q.args)
}
}
type queryOption func(q *query)
func (q *query) builder(options ...queryOption) {
q.mutex.Lock()
defer q.mutex.Unlock()
for _, option := range options {
option(q)
}
}
func buildInsert(cols []string, args []interface{}) queryOption {
return func(q *query) {
q.columns = cols
q.args = args
vals := []string{}
for i := 1; i <= len(q.args); i++ {
vals = append(vals, fmt.Sprintf("$%d", i))
}
q.stmt = fmt.Sprintf(insertStatement, q.table, strings.Join(q.columns, ","), strings.Join(vals, ","))
}
}
func buildUpdate(cols []string, args []interface{}) queryOption {
return func(q *query) {
q.columns = cols
q.args = args
stmt := fmt.Sprintf(updateStatement, q.table)
for _, col := range q.columns {
stmt += fmt.Sprintf(" %s = $%d,", col, q.argCounter)
q.argCounter++
}
// Remove the last comma
q.stmt = stmt[:len(stmt)-1]
}
}
func buildSelect() queryOption {
return func(q *query) {
q.stmt = fmt.Sprintf(selectStatement, strings.Join(q.columns, ","), q.table)
}
}
func buildWhere() queryOption {
return func(q *query) {
if len(q.where) == 0 {
return
}
var stmtType string
for _, where := range q.where {
if len(where) != 3 {
continue
}
column := where[0].(string)
operator := where[1].(string)
arg := where[2]
q.args = append(q.args, arg)
stmtType = whereClause
if strings.Contains(q.stmt, whereClause) {
stmtType = andClause
}
q.stmt += fmt.Sprintf(whereStatement, stmtType, column, operator, q.argCounter)
q.argCounter++
}
}
}
func buildWhereNull() queryOption {
return func(q *query) {
if len(q.whereNull) == 0 {
return
}
var stmtType string
for _, where := range q.whereNull {
if len(where) != 2 {
continue
}
col := where[0].(string)
isNull := where[1].(bool)
stmtType = whereClause
if strings.Contains(q.stmt, whereClause) {
stmtType = andClause
}
var nullStmt = isNotNullClause
if isNull {
nullStmt = isNullClause
}
q.stmt += fmt.Sprintf(whereNullStatement, stmtType, col, nullStmt)
}
}
}
func buildJoin() queryOption {
return func(q *query) {
for _, join := range q.join {
if join != nil && len(join) == 3 {
q.stmt += fmt.Sprintf(joinStatement, join[0], join[1], join[2])
}
}
}
}
func buildLeftJoin() queryOption {
return func(q *query) {
for _, join := range q.leftJoin {
if join != nil && len(join) == 3 {
q.stmt += fmt.Sprintf(leftJoinStatement, join[0], join[1], join[2])
}
}
}
}
func buildGroupBy() queryOption {
return func(q *query) {
if q.groupBy != nil {
q.stmt += fmt.Sprintf(groupByStatement, strings.Join(q.groupBy, ","))
}
}
}
func buildOrderBy() queryOption {
return func(q *query) {
if q.orderBy != nil {
q.stmt += fmt.Sprintf(orderByStatement, strings.Join(q.orderBy, ","))
}
}
}
func buildLimit() queryOption {
return func(q *query) {
if q.limit > 0 {
q.args = append(q.args, q.limit)
q.stmt += fmt.Sprintf(limitStatement, q.argCounter)
q.argCounter++
}
}
}
func buildOffset() queryOption {
return func(q *query) {
q.args = append(q.args, q.offset)
q.stmt += fmt.Sprintf(offsetStatement, q.argCounter)
q.argCounter++
}
}
func setDebug(s bool) queryOption {
return func(q *query) {
q.debug = s
}
}
func setTable(t string) queryOption {
return func(q *query) {
q.table = t
}
}
func setColumns(c []string) queryOption {
return func(q *query) {
q.columns = c
}
}
func setWhere(w []interface{}) queryOption {
return func(q *query) {
q.where = append(q.where, w)
}
}
func setWhereNull(wn []interface{}) queryOption {
return func(q *query) {
q.whereNull = append(q.whereNull, wn)
}
}
func setJoin(j []interface{}) queryOption {
return func(q *query) {
q.join = append(q.join, j)
}
}
func setLeftJoin(lj []interface{}) queryOption {
return func(q *query) {
q.leftJoin = append(q.leftJoin, lj)
}
}
func setGroupBy(gb []string) queryOption {
return func(q *query) {
q.groupBy = gb
}
}
func setOrderBy(ob []string) queryOption {
return func(q *query) {
q.orderBy = ob
}
}
func setLimit(l int) queryOption {
return func(q *query) {
q.limit = l
}
}
func setOffset(o int) queryOption {
return func(q *query) {
q.offset = o
}
}
|
// Longest Common Substring
// Dynamic Programming
//
// Time complexity: O(len(s1) * len(s2))
// Space complexity: O(len(s1) * len(s2))
//
// References:
// https://www.geeksforgeeks.org/longest-common-substring/
// https://github.com/mission-peace/interview/blob/master/src/com/interview/dynamic/LongestCommonSubstring.java
//
// Acknowledgements:
// Tushar Roy @mission-peace
// His video explaining this algorithm: https://www.youtube.com/watch?v=BysNXJHzCEs
package longcommsubstr
import "fmt"
func longestCommonSubstring(s1, s2 string) (int, [][]int) {
var max int
// Create a 2D slice len(s1)+1 * len(s2)+1. We'll index the slice at 1 to make our approach easier.
T := make([][]int, len(s1)+1)
for i, _ := range T {
T[i] = make([]int, len(s2)+1)
}
for i := 1; i <= len(s1); i++ {
for j := 1; j <= len(s2); j++ {
if s1[i-1] == s2[j-1] {
// If the character is the same, we take the previous character's value and add one. T[i-1][j-1] represents the previous character in both strings.
// As you can recall from above, the strings are indexed at 0, but our slice is indexed at 1, hence subtracting 1.
T[i][j] = T[i-1][j-1] + 1
// Keep track of the max as we go, so that we can return it at the end.
if T[i][j] > max {
max = T[i][j]
}
} else {
// If it doesn't match, we're still going to take the previous character's value, but not add 1.
T[i][j] = T[i-1][j-1]
}
}
}
// Return T so that we can pass it to another function to print the longest common substring.
return max, T
}
func printLongestCommonSubstring(s1 string, max int, T [][]int) string {
// Allocate a slice of bytes that's equal to the answer.
res := make([]byte, max)
// Initialize i, j outside of the for loop so that we can use it another for loop.
i, j := 1, 1
// Setup a break label so that we can break out of both loops when we find the position of the end of the longest common substring.
FindPosition:
for ; i < len(T); i++ {
for ; j < len(T[0]); j++ {
if T[i][j] == max {
break FindPosition
}
}
// Since i and j's scope is outside the above for loop, we need to set j back to 1 at the end of the loop.
j = 1
}
// While max is not 0, put characters into our result slice from end to beginning of the longest common substring.
for max > 0 {
res[max-1] = s1[i-1]
i--
max--
}
return fmt.Sprintf("%s", res)
}
|
package alchemyapi
import (
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"testing"
)
func TestNewAnalyzer(t *testing.T) {
key := "foooooooooooooooooooooooooooooooooooobar"
keyInvalid := key + "!"
_, err := NewAnalyzer(key)
if err != nil {
t.Error("should not be error")
}
_, err = NewAnalyzer(keyInvalid)
if err == nil {
t.Error("should be error")
}
}
func TestAnalyzerSetBaseUrl(t *testing.T) {
analyzer, _ := NewAnalyzer("foooooooooooooooooooooooooooooooooooobar")
analyzer.SetBaseUrl("baseuri")
if analyzer.baseUrl != "baseuri" {
t.Error("Should be setted")
}
}
func TestAnalyzerSentiment(t *testing.T) {
apiKey := "foooooooooooooooooooooooooooooooooooobar"
handler := func(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
r.ParseForm()
switch r.URL.String() {
case entryPoints["sentiment"]["html"], entryPoints["sentiment"]["text"], entryPoints["sentiment"]["url"]:
switch r.FormValue("test_eager") {
case "":
resp := &SentimentResponse{}
resp.DocSentiment.Mixed = 1
resp.DocSentiment.Score = -0.5
resp.DocSentiment.Type = "negative"
resp.Language = "english"
resp.Status = "OK"
resp.Text = r.FormValue("html") + r.FormValue("text") + r.FormValue("url")
resp.TotalTransactions = 1
resp.Usage = "Usage information"
data, _ := json.Marshal(&resp)
w.Write(data)
default:
w.Write([]byte("{\"status\":\"ERROR\",\"statusInfo\":\"malfunction\"}"))
}
default:
w.Write([]byte("{\"status\":\"ERROR\",\"statusInfo\":\"unsupport\"}"))
}
} else {
w.Write([]byte("{\"status\":\"ERROR\",\"statusInfo\":\"unsupport\"}"))
}
}
server := httptest.NewServer(http.HandlerFunc(handler))
defer server.Close()
analyzer, _ := NewAnalyzer(apiKey)
analyzer.SetBaseUrl(server.URL)
// normal
for _, v := range []string{"html", "text", "url"} {
resp, err := analyzer.Sentiment(
v,
"Yesterday dumb Bob destroyed my fancy iPhone in beautiful Denver, Colorado. ",
url.Values{},
)
if err != nil {
t.Errorf("With sentiment flavor %s, should not raise exception", v)
}
if resp == nil {
t.Errorf("With sentiment flavor %s, should not be nil", v)
}
if resp.Status != "OK" {
t.Errorf("With sentiment flavor %s, should be ok", v)
}
}
// malfunction
for _, v := range []string{"html", "text", "url"} {
options := url.Values{}
options.Add("test_eager", "malfunction")
resp, err := analyzer.Sentiment(
v,
"foobar",
options,
)
if err == nil {
t.Errorf("With sentiment flavor %s, should raise exception", v)
}
if err.Error() != "malfunction" {
t.Errorf("With sentiment flavor %s, should raise malfunction message", v)
}
if resp != nil {
t.Errorf("With sentiment flavor %s, should be nil", v)
}
}
}
|
package postal
import (
"log"
"math"
"strings"
"time"
"github.com/cloudfoundry-incubator/notifications/cf"
"github.com/cloudfoundry-incubator/notifications/gobble"
"github.com/cloudfoundry-incubator/notifications/mail"
"github.com/cloudfoundry-incubator/notifications/metrics"
"github.com/cloudfoundry-incubator/notifications/models"
"github.com/pivotal-cf/uaa-sso-golang/uaa"
"github.com/pivotal-golang/conceal"
)
type Delivery struct {
User uaa.User
Options Options
UserGUID string
Space cf.CloudControllerSpace
Organization cf.CloudControllerOrganization
ClientID string
Templates Templates
MessageID string
}
type DeliveryWorker struct {
logger *log.Logger
mailClient mail.ClientInterface
globalUnsubscribesRepo models.GlobalUnsubscribesRepoInterface
unsubscribesRepo models.UnsubscribesRepoInterface
kindsRepo models.KindsRepoInterface
database models.DatabaseInterface
sender string
encryptionKey string
gobble.Worker
}
func NewDeliveryWorker(id int, logger *log.Logger, mailClient mail.ClientInterface, queue gobble.QueueInterface,
globalUnsubscribesRepo models.GlobalUnsubscribesRepoInterface, unsubscribesRepo models.UnsubscribesRepoInterface,
kindsRepo models.KindsRepoInterface, database models.DatabaseInterface, sender, encryptionKey string) DeliveryWorker {
worker := DeliveryWorker{
logger: logger,
mailClient: mailClient,
globalUnsubscribesRepo: globalUnsubscribesRepo,
unsubscribesRepo: unsubscribesRepo,
kindsRepo: kindsRepo,
database: database,
sender: sender,
encryptionKey: encryptionKey,
}
worker.Worker = gobble.NewWorker(id, queue, worker.Deliver)
return worker
}
func (worker DeliveryWorker) Deliver(job *gobble.Job) {
var delivery Delivery
err := job.Unmarshal(&delivery)
if err != nil {
metrics.NewMetric("counter", map[string]interface{}{
"name": "notifications.worker.panic.json",
}).Log()
worker.Retry(job)
}
if worker.ShouldDeliver(delivery) {
message := worker.pack(delivery)
status := worker.SendMail(message)
if status != StatusDelivered {
worker.Retry(job)
metrics.NewMetric("counter", map[string]interface{}{
"name": "notifications.worker.retry",
}).Log()
} else {
metrics.NewMetric("counter", map[string]interface{}{
"name": "notifications.worker.delivered",
}).Log()
}
} else {
metrics.NewMetric("counter", map[string]interface{}{
"name": "notifications.worker.unsubscribed",
}).Log()
}
}
func (worker DeliveryWorker) Retry(job *gobble.Job) {
if job.RetryCount < 10 {
duration := time.Duration(int64(math.Pow(2, float64(job.RetryCount))))
job.Retry(duration * time.Minute)
layout := "Jan 2, 2006 at 3:04pm (MST)"
worker.logger.Printf("Message failed to send, retrying at: %s", job.ActiveAt.Format(layout))
}
}
func (worker DeliveryWorker) ShouldDeliver(delivery Delivery) bool {
conn := worker.database.Connection()
if worker.isCritical(conn, delivery.Options.KindID, delivery.ClientID) {
return true
}
globallyUnsubscribed, err := worker.globalUnsubscribesRepo.Get(conn, delivery.UserGUID)
if err != nil || globallyUnsubscribed {
worker.logger.Printf("Not delivering because %s has unsubscribed", delivery.User.Emails[0])
return false
}
_, err = worker.unsubscribesRepo.Find(conn, delivery.ClientID, delivery.Options.KindID, delivery.UserGUID)
if err != nil {
if (err == models.ErrRecordNotFound{}) {
return len(delivery.User.Emails) > 0 && strings.Contains(delivery.User.Emails[0], "@")
}
worker.logger.Printf("Not delivering because: %+v", err)
return false
}
worker.logger.Printf("Not delivering because %s has unsubscribed", delivery.User.Emails[0])
return false
}
func (worker DeliveryWorker) isCritical(conn models.ConnectionInterface, kindID, clientID string) bool {
kind, err := worker.kindsRepo.Find(conn, kindID, clientID)
if (err == models.ErrRecordNotFound{}) {
return false
}
return kind.Critical
}
func (worker DeliveryWorker) pack(delivery Delivery) mail.Message {
cloak, err := conceal.NewCloak([]byte(worker.encryptionKey))
if err != nil {
panic(err)
}
context := NewMessageContext(delivery, worker.sender, cloak)
packager := NewPackager()
message, err := packager.Pack(context)
if err != nil {
panic(err)
}
return message
}
func (worker DeliveryWorker) SendMail(message mail.Message) string {
err := worker.mailClient.Connect()
if err != nil {
worker.logger.Printf("Error Establishing SMTP Connection: %s", err.Error())
return StatusUnavailable
}
worker.logger.Printf("Attempting to deliver message to %s", message.To)
err = worker.mailClient.Send(message)
if err != nil {
worker.logger.Printf("Failed to deliver message due to SMTP error: %s", err.Error())
return StatusFailed
}
worker.logger.Printf("Message was successfully sent to %s", message.To)
return StatusDelivered
}
|
package isakura
import (
"fmt"
"net/http"
"net/url"
"time"
"io"
"io/ioutil"
"encoding/json"
"path/filepath"
"os"
"sync"
"regexp"
"log"
"strings"
uuid "github.com/satori/go.uuid"
)
var saveMutex sync.Mutex
func (isakura *Isakura) refresh() error {
return fmt.Errorf("Unsupported")
}
func (isakura *Isakura) login() error {
auth_host_url := fmt.Sprintf("%s/logon.sjs", isakura.AuthHostUrl)
resp, err := http.PostForm(auth_host_url, url.Values{"device_id": {"3079898AA6E3EA6D0999F4DE748B5595"}, "cid": {isakura.Username}, "password": {isakura.Password}, "redirect_url": {"http://webtv.jptvpro.net/play.html"}})
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
err = json.Unmarshal(body, &isakura.Session)
if err != nil {
panic(err)
}
//fmt.Printf("It Worked! isakura initialized!\n %+v\n", isakura)
return nil
}
func (isakura *Isakura) MaintainSession() {
if isakura.Session.AccessToken == "" {
isakura.login()
} else {
fmt.Printf("Using existing Session\n")
}
isakura.Save()
refreshInXSeconds := isakura.Session.ExpireTime-isakura.Session.ServerTime-300
fmt.Printf("Forced Token Refresh in %v seconds\n", refreshInXSeconds)
backoff := 1
for {
refreshInXSeconds := isakura.Session.ExpireTime-isakura.Session.ServerTime-300
if refreshInXSeconds < 0 || isakura.Session.Expired {
err := isakura.refresh() // try to fresh
if err != nil {
err = isakura.login() // can't refresh... just re-login
if err != nil {
if backoff < 60 {
backoff += 1 // if we can't login.. we should backoff
}
} else {
backoff = 1
}
}
}
time.Sleep(time.Second * time.Duration(30 * backoff))
}
}
func (isakura *Isakura) MaintainChannelGuide(downloads chan Download) {
for {
fmt.Printf("Download Channels\n")
isakura.retrieveChannelGuide()
fmt.Printf("Downloaded Channels\n")
isakura.Save()
fmt.Printf("Scan for new Content\n")
isakura.scan(downloads)
fmt.Printf("Scanned for new Content\n")
time.Sleep(time.Second * time.Duration(60 * 60))
}
}
func (isakura *Isakura) scan(downloads chan Download) {
for _, monitor := range isakura.Monitors {
fmt.Printf("Scan Guide for %v\n", monitor.Pattern)
r, err := regexp.Compile(monitor.Pattern)
if (err != nil) {
fmt.Printf("Regex Error:-( %v\n", err)
continue
}
// TODO: FIX Channels.Channels
for _, channel := range isakura.Channels.Channels {
fmt.Printf("Scan Channel %v\n", channel.Name)
for _, program := range channel.Programs {
if ! program.Downloaded {
if r.MatchString(program.Title) {
fmt.Printf("%v matches\n", program.Title)
downloads <- Download{Program: program, Channel: channel, Monitor: monitor}
}
}
}
}
}
}
func (isakura *Isakura) retrieveChannelGuide() {
for isakura.Session.AccessToken == "" { // we are not ready!
time.Sleep(time.Second)
}
v := url.Values{ "action": {"listLives"}, "cid": {"2E2FAA0BF6E84FE0C34955CA0DFB6AAD"}, "details": {"1"}, "page_size": {"200"}, "sort": {"created_time desc"}, "type": {"video"}, "uid": {"C2D9261F3D5753E74E97EB28FE2D8B26"}, "referer": {"http://isakura.tv"}}
auth_host_url := fmt.Sprintf("%s/api?%v", isakura.Session.ProductConfig.VmsHost, v.Encode())
resp, err := http.Get(auth_host_url)
if err != nil {
panic(err)
}
body, err := ioutil.ReadAll(resp.Body)
//fmt.Printf(string(body))
if err != nil {
panic(err)
}
err = json.Unmarshal(body, &isakura.Channels)
if err != nil {
panic(err)
}
// invert channel listing (oldie is goldie!)
channels := isakura.Channels.Channels
for i := len(channels)/2-1; i >= 0; i-- {
opp := len(channels)-1-i
channels[i], channels[opp] = channels[opp], channels[i]
}
}
// Stream a preview
func (isakura *Isakura) Preview(download Download, r *http.Request, w io.Writer) error {
fmt.Printf("Download: %v from %v\n", download.Program.Title, download.Channel.Name)
if download.Program.Path == "" {
return fmt.Errorf("Empty Playpath, can't download\n")
}
pq, err := isakura.query(download)
if err != nil {
return fmt.Errorf("Error Querying Package: %v, hopefully we get it on the next scan\n", err)
}
v := url.Values{ "__download": {"1"}, "sc_tk": {pq.ScTk} }
downloadUrl := fmt.Sprintf("%v/video.fpvsegments?%v", pq.Substreams[0].HttpUrl, v.Encode())
fmt.Printf("Download Url: %v\n", downloadUrl)
req, _ := http.NewRequest("GET", downloadUrl, nil)
req.Header.Add("Content-Type", "video/x-flv")
if r.Header.Get("Range") != "" {
log.Printf("Got Range Header")
req.Header.Set("Range", r.Header.Get("Range"))
}
//req.Header.Add("Range", "bytes=0-5000000")
//req.Header.Add("Range", "bytes=5000000-10000000")
//req.Header.Add("Range", "bytes=10000000-15000000")
var client http.Client
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("Error - %v\n", err)
}
defer resp.Body.Close()
io.Copy(w, resp.Body)
return nil
}
func (isakura *Isakura) FileName(download Download) string {
program := download.Program
title := program.Title
monitor := download.Monitor
log.Printf("Download - %+v", program)
log.Printf("Starting Title - %v", title)
time := time.Unix(download.Program.Time, 0)
finalName := ""
if monitor.PrependDate {
finalName+=time.Format("MM-DD-YYYY") + " "
}
if monitor.PrependTime {
finalName+=time.Format("HH:mm") + " "
}
for _, modifier := range monitor.Modifiers {
log.Printf("Modify %v\n", title)
title = strings.Replace(title, modifier.Search, modifier.Replace, -1)
log.Printf("Search: %v, Replace: %v, New Title: %v\n", modifier.Search, modifier.Replace, title)
}
finalName+=title
if monitor.AppendDate {
finalName+=" " + time.Format("MM-DD-YYYY")
}
if monitor.AppendTime {
finalName+=" " + time.Format("HH:mm")
}
log.Printf("Ending Title - %v", title)
return title
}
func (isakura *Isakura) Delete(contentId string) error {
for i, download := range isakura.Downloads {
if download.ID == contentId {
err := os.Remove(download.LocalPath)
if err != nil {
return err
}
isakura.Downloads = append(isakura.Downloads[:i], isakura.Downloads[i+1:]...)
isakura.Save()
return nil
}
}
return fmt.Errorf("Not Found")
}
func (isakura *Isakura) Download(downloads chan Download) {
for {
download := <- downloads
filename := isakura.FileName(download)
folder := download.Monitor.Folder
folderPath := filepath.Join(isakura.Home, "videos", folder)
err := os.MkdirAll(folderPath, 0777)
if (err != nil) {
log.Printf("%v", err)
continue
}
savePath := filepath.Join(isakura.Home, "videos", folder, fmt.Sprintf("%v._flv", filename) )
finalPath := filepath.Join(isakura.Home, "videos", folder, fmt.Sprintf("%v.flv", filename) )
fmt.Printf("Download: %v from %v\n", download.Program.Title, download.Channel.Name)
if download.Program.Path == "" {
fmt.Printf("Empty Playpath, can't download\n")
continue
}
// Do not to forward if File exists
if _, err = os.Stat(finalPath); ! os.IsNotExist(err) {
fmt.Printf("Not Downloading since we already have it -> %v\n", finalPath)
continue
}
pq, err := isakura.query(download)
if err != nil {
fmt.Printf("Error Querying Package: %v, hopefully we get it on the next scan\n", err)
continue
}
v := url.Values{ "__download": {"1"}, "sc_tk": {pq.ScTk} }
downloadUrl := fmt.Sprintf("%v/video.fpvsegments?%v", pq.Substreams[0].HttpUrl, v.Encode())
fmt.Printf("Downlaod Url: %v\n", downloadUrl)
resp, err := http.Get(downloadUrl)
if err != nil {
fmt.Printf("Error - %v\n", err)
}
defer resp.Body.Close()
out, err := os.Create(savePath)
if err != nil {
panic(err)
}
defer out.Close()
io.Copy(out, resp.Body)
os.Rename(savePath, finalPath)
newUuid, _ := uuid.NewV4()
isakura.Downloads=append(isakura.Downloads, LocalProgram{ ID: newUuid.String(), Program: download.Program, LocalPath: finalPath, Monitor: download.Monitor, DownloadDate: time.Now() })
download.Program.Downloaded=true
isakura.Save()
}
}
func (isakura *Isakura) query(download Download) (*ProgramQuery, error) {
v := url.Values{ "type": {"vod"}, "access_token": {isakura.Session.AccessToken}, "pageUrl": {"http://webtv.jptvpro.net/isakura/main.html"}, "refresh_token": {isakura.Session.RefreshToken}, "expires_in": {fmt.Sprintf("%v",isakura.Session.ExpireTime)}}
queryPath := fmt.Sprintf("%s%s.json?%v", isakura.Session.ProductConfig.VmsHost, download.Program.Path, v.Encode())
fmt.Printf("Download Package Info using: \n%v\n", queryPath)
resp, err := http.Get(queryPath)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Printf("ProgramQuery: %v\n", string(body))
var programQuery ProgramQuery
err = json.Unmarshal(body, &programQuery)
if err != nil {
panic(err)
}
fmt.Printf("ProgramQuery: %+v\n", programQuery)
return &programQuery, nil
}
func (isakura *Isakura) Load() {
if _, err := os.Stat(isakura.Home); os.IsNotExist(err) {
fmt.Printf("No existing Configuration found in %v\n", isakura.Home)
return
}
savePath := filepath.Join(isakura.Home, "config")
data, err := ioutil.ReadFile(savePath)
if err != nil {
fmt.Printf("Unable to read Configuration %v, Error: %v\n", savePath, err)
}
err = json.Unmarshal(data, &isakura.ISakuraSaveData)
if err != nil {
fmt.Printf("Unable to read Configuration %v, Error: %v\n", savePath, err)
}
}
func (isakura *Isakura) Save() {
saveMutex.Lock()
defer saveMutex.Unlock()
videosPath := filepath.Join(isakura.Home, "videos")
if _, err := os.Stat(isakura.Home); os.IsNotExist(err) {
err = os.Mkdir(isakura.Home, 0700)
if err != nil {
panic(err)
}
err = os.Mkdir(videosPath, 0700)
if err != nil {
panic(err)
}
}
savePath := filepath.Join(isakura.Home, "config.tmp")
finalPath := filepath.Join(isakura.Home, "config")
fmt.Printf("Saving Configuration to %v\n", savePath)
b, err := json.MarshalIndent(isakura.ISakuraSaveData, "", "\t")
if err != nil {
fmt.Println("error saving:", err)
}
err = ioutil.WriteFile(savePath, b, 0600)
if err != nil {
panic(fmt.Sprintf("Could not save to %v, Error: %v!\n", savePath, err))
}
os.Rename(savePath, finalPath)
}
|
// Copyright 2014 The Sporting Exchange Limited. All rights reserved.
// Use of this source code is governed by a free license that can be
// found in the LICENSE file.
package tsdb
import (
"bytes"
"fmt"
"log"
"strconv"
"time"
)
const maxLineLength = 1023 // limit in net.opentsdb.tsd.PipelineFactory
func (p *Point) unmarshalText(buf []byte) error {
switch {
default:
// ok
case len(buf) == 0:
return fmt.Errorf("tsdb: invalid point: empty string")
case len(buf) > maxLineLength:
return fmt.Errorf("tsdb: invalid point: line too long (%d>%d)", len(buf), maxLineLength)
}
p.reset()
originalBuf := buf
metric, buf := skipNonSpace(buf)
if err := p.setMetric(metric); err != nil {
return fmt.Errorf("tsdb: invalid metric: %v: %q, in %q", err, metric, originalBuf)
}
buf = skipSpace(buf)
timeText, buf := skipNonSpace(buf)
t, err := parseTime(timeText)
if err != nil {
return fmt.Errorf("tsdb: invalid time: %v, in %q", err, originalBuf)
}
if err := p.setTime(t); err != nil {
return fmt.Errorf("tsdb: invalid time: %v, in %q", err, originalBuf)
}
buf = skipSpace(buf)
valueText, buf := skipNonSpace(buf)
if err := p.setValueBytes(valueText); err != nil {
return fmt.Errorf("tsdb: invalid value: %v, in %q", err, originalBuf)
}
if err := p.setTags(buf, skipNonSpace); err != nil {
return fmt.Errorf("tsdb: invalid tags: %v, in %q", err, originalBuf)
}
return nil
}
// append appends the marshalled version of the point to the end of the provided
// slice. If the slice has insufficient capacity, it is grown using the built-in
// append. In any case, append returns the updated slice.
//
// BUG(masiulaniecj): Millisecond resolution is accepted by Decode and
// NewPoint, but lost by Encode and Put.
func (p *Point) append(b []byte) []byte {
b = append(b, p.metric...)
b = append(b, ' ')
b = appendInt(b, p.time/1e9)
b = append(b, ' ')
if p.isFloat {
b = appendFloat(b, p.valueFloat)
} else {
b = appendInt(b, p.valueInt)
}
b = append(b, p.tags...)
return b
}
func (p *Point) appendSeries(buf []byte) []byte {
buf = append(buf, p.metric...)
buf = append(buf, p.tags...)
return buf
}
func appendInt(buf []byte, n int64) []byte {
tmp := make([]byte, 0, 32)
neg := false
if n < 0 {
neg = true
n *= -1
}
for n >= 10 {
rem := n % 10
tmp = append(tmp, '0'+byte(rem))
n /= 10
}
tmp = append(tmp, '0'+byte(n))
if neg {
buf = append(buf, '-')
}
for i := len(tmp) - 1; i >= 0; i-- {
buf = append(buf, tmp[i])
}
return buf
}
func appendFloat(buf []byte, n float32) []byte {
tmp := make([]byte, 0, 32)
tmp = strconv.AppendFloat(tmp, float64(n), 'f', -1, 32)
if bytes.IndexByte(tmp, '.') == -1 {
tmp = append(tmp, ".0"...)
}
buf = append(buf, tmp...)
return buf
}
var dot = []byte(".")
func parseTime(b []byte) (time.Time, error) {
if !(len(b) > 0 && (len(b) <= 10 || len(b) == 13)) {
return time.Time{}, fmt.Errorf("invalid syntax: %q", b)
}
var n int64
for _, c := range b {
if !('0' <= c && c <= '9') {
return time.Time{}, fmt.Errorf("invalid syntax: %q", b)
}
n *= 10
n += int64(c - byte('0'))
}
if len(b) == 13 { // is millis?
n *= 1e6
} else {
n *= 1e9
}
return time.Unix(0, n), nil
}
func parseValue(b []byte) (interface{}, error) {
var v interface{}
var err error
if bytes.IndexByte(b, '.') != -1 {
v, err = parseFloat(b)
} else {
v, err = parseInt(b)
}
if err != nil {
err = fmt.Errorf("%v: %q", err, b)
}
return v, err
}
func parseInt(b []byte) (int64, error) {
neg := false
if len(b) == 0 {
return 0, fmt.Errorf("invalid syntax")
}
if b[0] == '-' {
neg = true
b = b[1:]
}
var n int64
for _, c := range b {
if !('0' <= c && c <= '9') {
return 0, fmt.Errorf("invalid syntax")
}
n *= 10
n += int64(c - byte('0'))
}
if neg {
n *= -1
}
return n, nil
}
func parseFloat(s []byte) (float32, error) {
n, err := strconv.ParseFloat(string(s), 64)
return float32(n), err
}
func skipSpace(b []byte) []byte {
for i, ch := range b {
if !isSpace(ch) {
return b[i:]
}
}
return b[len(b):]
}
func skipSpaceNorm(b []byte) []byte {
if b[0] != ' ' {
log.Panicf("invalid buf: %s", string(b))
}
return b[:1]
}
func skipNonSpace(b []byte) (word, left []byte) {
for i, ch := range b {
if isSpace(ch) {
return b[:i], b[i:]
}
}
return b[:], b[:0]
}
func skipNonSpaceNorm(b []byte) (word, left []byte) {
i := bytes.IndexByte(b, ' ')
if i > 0 {
return b[:i], b[i:]
}
return b[:], b[:0]
}
func isSpace(ch byte) bool { return ch == ' ' || ch == '\t' }
|
package main
import (
"fmt"
)
type User interface {
PrintNama()
PrintDetails()
}
type Person struct{
Nama string
Asal string
Email string
}
func (p *Person) PrintNama(){
fmt.Printf("Nama :%s", p.Nama)
}
func (p *Person) PrintDetails(){
fmt.Printf("Asal : %s, Email : %s", p.Asal, p.Email)
}
type Admin struct{
Person
Tugas []string
}
func (a *Admin) PrintDetails(){
a.Person.PrintDetails()
fmt.Println("tugas :")
for _,v := range a.Tugas{
fmt.Println(v)
}
}
type Member struct{
Person
Tugas []string
}
func (m *Member) PrintDetails(){
m.Person.PrintDetails()
fmt.Println("tugas :")
for _,v := range m.Tugas{
fmt.Println(v)
}
}
type Team struct{
Users []User
}
func (t *Team) GetTeamDetails(){
fmt.Println("Detail Team")
for _,v := range t.Users{
v.PrintNama()
v.PrintDetails()
}
}
func main() {
amir := &Admin{
Person{
"Amir",
"Yogyakarta",
"amir@gmail.com",
},
[]string{"Manajemen User", "Manajemen tugas"},
}
udin := &Member{
Person{
"Udin",
"Jakarta",
"udin@gmail.com",
},
[]string{"Lihat data", "update profile"},
}
team := Team{
[]User{amir, udin},
}
team.GetTeamDetails()
}
|
package main
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
var (
// shutdown is a flag to alert running goroutines to shutdown
shutdown int64
// wg is used to wait for the program to finish
wg sync.WaitGroup
)
func main() {
//Add a count of two , one for each goroutine.
wg.Add(2)
//Create two goroutines
go doWork("A")
go doWork("B")
//Give the goroutines time to run
time.Sleep(1 * time.Second)
//Safely flag it is time to shutdown
fmt.Println("Shutdown Now")
atomic.StoreInt64(&shutdown,1)
wg.Wait()
}
func doWork(name string) {
defer wg.Done()
for {
fmt.Printf("Doing %s Work",name)
time.Sleep(250 * time.Second)
if atomic.LoadInt64(&shutdown) == 1 {
fmt.Printf("Shutting %s down \n", name)
break
}
}
}
|
package main
import "github.com/urfave/cli"
// These are the core settings and requirements for the plugin to run
// Config is configuration settings by user
type Config struct {
User string
Key string
Server string
AppID string
File string
Src string
Channel string
Publish string
}
// formatFile creates a file name like "awesome.2.0.0.tar"
func (c Config) formatFile(version string) string {
return c.File + "." + version + ".tar"
}
func (c Config) createPkgCMD(version, file string) []string {
return []string{
"package",
"create",
"--app-id=" + c.AppID,
"--version=" + version,
"--file=" + file,
"--url=" + c.Server + "/packages/" + file,
}
}
func (c Config) uploadPkgCMD(file string) []string {
return []string{
"package",
"upload",
"--file=" + file,
}
}
func (c Config) updateChanCMD(version string) []string {
return []string{
"channel",
"update",
"--app-id=" + c.AppID,
"--channel=" + c.Channel,
"--version=" + version,
"--publish=" + c.Publish,
}
}
func (c Config) credFlags() []string {
return []string{
"--key=" + c.Key,
"--user=" + c.User,
"--server=" + c.Server,
}
}
var configArgs = []cli.Flag{
cli.StringFlag{
Name: "app.id",
Usage: "updateservicectl --app-id",
EnvVar: "APP_ID,PLUGIN_APP_ID",
},
cli.StringFlag{
Name: "key",
Usage: "updateservicectl --key",
EnvVar: "KEY,PLUGIN_KEY",
},
cli.StringFlag{
Name: "user",
Usage: "updateservicectl --user",
EnvVar: "CTL_USER,PLUGIN_USER",
},
cli.StringFlag{
Name: "server",
Usage: "updateservicectl --server",
EnvVar: "SERVER,PLUGIN_SERVER",
},
cli.StringFlag{
Name: "pkg.file",
Usage: "updateservicectl package [create || upload] --file",
EnvVar: "PKG_FILE,PLUGIN_PKG_FILE",
},
cli.StringFlag{
Name: "pkg.src",
Usage: "target directory to tarball",
EnvVar: "PKG_SRC,PLUGIN_PKG_SRC",
},
cli.StringFlag{
Name: "channel",
Usage: "updateservicectl channel",
EnvVar: "CHANNEL,PLUGIN_CHANNEL",
},
cli.StringFlag{
Name: "publish",
Usage: "updateservicectl channel --publish",
EnvVar: "PUBLISH,PLUGIN_PUBLISH",
},
}
|
package bindata
import (
"bytes"
"errors"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
var (
errIsDirectory = errors.New("is a directory")
errIsFile = errors.New("is a file")
)
// dir is an in-memory implementation of vfs.FileSystem
type dir struct {
name string
files map[string]*file
dirs map[string]*dir
}
// FileSystem interface
func (d *dir) Open(path string) (http.File, error) {
path = strings.TrimLeft(filepath.Clean(path), string([]rune{filepath.Separator}))
//log.Printf("bindata: open %q", path)
switch path {
case "", "..":
return nil, &os.PathError{"open", path, os.ErrNotExist}
case ".":
return d, nil
}
components := strings.Split(path, string([]rune{os.PathSeparator}))
current := d
for i, c := range components {
if i < len(components)-1 {
// is a directory
if current.dirs == nil {
// current dir has no subdirs
return nil, &os.PathError{"open", path, os.ErrNotExist}
}
if dd, ok := current.dirs[c]; ok {
current = dd
} else {
// current dir has no such subdir
return nil, &os.PathError{"open", path, os.ErrNotExist}
}
} else {
// is the target file or directory
if current.files != nil {
if f := current.file(c); f != nil {
return f, nil
}
}
if current.dirs != nil {
if d, ok := current.dirs[c]; ok {
return d, nil
}
}
return nil, &os.PathError{"open", path, os.ErrNotExist}
}
}
return nil, os.ErrNotExist
}
func (d *dir) Walk(path string, fn filepath.WalkFunc) error {
targetDir, err := d.Open(path)
if err != nil {
return err
}
if x, ok := targetDir.(*dir); ok {
return x.walk(path, fn)
}
return &os.PathError{"walk", path, errIsFile}
}
// recursive; never returns an error
// the path argument is just a way for the full path to follow the stack
// downwards
func (d *dir) walk(path string, fn filepath.WalkFunc) error {
fn(path, d, nil)
if d.files != nil {
for name := range d.files {
fn(filepath.Join(path, name), d.file(name), nil)
}
}
if d.dirs != nil {
for name, dd := range d.dirs {
dd.walk(filepath.Join(path, name), fn)
}
}
return nil
}
// http.File interface
func (d *dir) Close() error {
return nil
}
func (d *dir) Read(p []byte) (int, error) {
return 0, &os.PathError{"read", d.name, errIsDirectory}
}
func (d *dir) Readdir(count int) ([]os.FileInfo, error) {
fis := make([]os.FileInfo, 0, len(d.files)+len(d.dirs))
for name := range d.files {
fis = append(fis, d.file(name))
}
for _, dir := range d.dirs {
fis = append(fis, dir)
}
return fis, nil
}
func (d *dir) Seek(offset int64, whence int) (int64, error) {
return 0, &os.PathError{"seek", d.name, errIsDirectory}
}
func (d *dir) Stat() (os.FileInfo, error) {
return d, nil
}
// os.FileInfo interface
func (d *dir) Name() string { return d.name }
func (d *dir) Size() int64 { return 0 }
func (d *dir) Mode() os.FileMode { return os.ModeDir | 0400 }
func (d *dir) ModTime() time.Time { return startupTime }
func (d *dir) IsDir() bool { return true }
func (d *dir) Sys() interface{} { return d }
func (d *dir) file(name string) *file {
f, ok := d.files[name]
if !ok {
return nil
}
return &file{
name: f.name,
mod: f.mod,
Reader: bytes.NewReader(f.data),
}
}
type file struct {
name string
mod time.Time
// sort of like a union (either Reader when opened for reading or []byte
// for storage)
data []byte
*bytes.Reader
}
// http.File interface
// Seek(int64, int) (int64, error) implemented by *bytes.Reader
func (f *file) Close() error {
return nil
}
func (f *file) Readdir(count int) ([]os.FileInfo, error) {
return nil, &os.PathError{"readdir", f.name, errIsFile}
}
func (f *file) Stat() (os.FileInfo, error) {
return f, nil
}
// os.FileInfo interface
// Size() int64 is implemented in *bytes.Reader
func (f *file) Name() string { return f.name }
func (f *file) Mode() os.FileMode { return 0400 }
func (f *file) ModTime() time.Time { return f.mod }
func (f *file) IsDir() bool { return false }
func (f *file) Sys() interface{} { return f }
|
package errutil
import (
"errors"
"fmt"
"testing"
)
func recovered(f func()) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%v", r)
}
}()
f()
return
}
func TestFirst(t *testing.T) {
err1 := errors.New("first error")
err2 := errors.New("second error")
var tests = []struct {
in []error
out error
}{
{[]error{nil, nil, nil, nil}, nil},
{[]error{err1, nil, nil, nil}, err1},
{[]error{nil, nil, err1, err2}, err1},
{[]error{nil, err2, nil, err1}, err2},
{[]error{}, nil},
}
for _, test := range tests {
out := First(test.in...)
if out != test.out {
t.Errorf("First(%v) = '%v', expected '%v'", test.in, out, test.out)
}
}
}
func TestFatalIf(t *testing.T) {
var tests = []struct {
in error
fail bool
}{
{errors.New("test error"), true},
{nil, false},
}
for _, test := range tests {
err := recovered(func() { FatalIf(test.in) })
if !test.fail {
if err != nil {
t.Errorf("FatalIf(%v): expected to pass, but it hasn't", test.in)
}
continue
}
if err == nil {
t.Errorf("FatalIf(%v): expected to fail, but it hasn't", test.in)
}
if want, got := fmt.Sprintf("FATAL: %v", test.in), err.Error(); want != got {
t.Errorf("FatalIf(%v): unexpected error, want = %q, got = %q", test.in, want, got)
}
}
}
|
package random
import (
"math/rand"
"time"
)
var r *rand.Rand
func init() {
r = rand.New(rand.NewSource(time.Now().UnixNano()))
}
func Generate(strlen int) string {
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
result := ""
for i:=0; i<strlen; i++ {
index:=r.Intn(len(chars))
result += chars[index: index + 1]
}
return result
}
|
package codec
import (
"fmt"
"os"
"time"
)
type Message struct {
data []byte
counter uint64
}
func (m *Message) Data() []byte {
return m.data
}
func MakeMsg(f *os.File, size uint64) (Message, error) {
data := make([]byte, size)
m := Message{data, 0}
_, e := f.Read(data)
return m, e
}
func MsToTime(ms uint64) time.Time {
//return time.Unix(0, int64(ms)*int64(time.Millisecond))
return time.Unix(int64(ms)/1000, 0)
}
func (m *Message) Get(inc uint64) []byte {
t := m.counter + inc
if t > uint64(len(m.data)) {
//return m.data, false
av := uint64(len(m.data)) - m.counter
panic(fmt.Sprintf("Only %v available but %v requested.", av, inc))
}
a := m.data[m.counter:t]
m.counter = t
return a
}
func ToInt(data []byte) uint64 {
//i, j := binary.Uvarint(data)
i := Uvarint(Rev(data))
//fmt.Printf("%s %v %v\n", data, data, i)
return i
//fmt.Printf("%s", MsToTime(i))
}
func Rev(buf []byte) []byte {
l := len(buf)
for i := 0; i < l/2; i++ {
buf[i], buf[l-i-1] = buf[l-i-1], buf[i]
}
return buf
}
func Uvarint(buf []byte) uint64 {
var x uint64
for i, b := range buf {
x |= (uint64(b) << (uint8(i) * 8))
}
return x
}
|
package servers
import (
"github.com/s-matyukevich/centurylink_sdk/base"
"github.com/s-matyukevich/centurylink_sdk/models"
"time"
)
type GetServerRes struct {
Connection base.Connection
Id string
Name string
Description string
GroupId string
IsTemplate bool
LocationId string
OsType string
Status string
Details DetailsDef
Type string
StorageType string
ChangeInfo ChangeInfoDef
Links []models.Link
}
type DetailsDef struct {
IpAddresses []IpAddressDef
AlertPolicies []AlertPolicyDef
Cpu int
DiskCount int
HostName string
InMaintenanceMode bool
MemoryMB int
PowerState string
StorageGB int
Snapshots []SnapshotsDef
CustomFields []CustomFieldDef
}
type IpAddressDef struct {
Public string
Internal string
}
type AlertPolicyDef struct {
Id string
Name string
Links []models.Link
}
type SnapshotsDef struct {
Name string
Links []models.Link
}
type CustomFieldsDef struct {
Id string
Name string
Value string
DisplayValue string
}
type ChangeInfoDef struct {
CreatedDate time.Time
CreatedBy string
ModifiedDate time.Time
ModifiedBy string
}
var _ models.LinkModel = (*GetServerRes)(nil)
func (r *GetServerRes) GetLinks() []models.Link {
return r.Links
}
func (r *GetServerRes) GetConnection() base.Connection {
return r.Connection
}
func (r *GetServerRes) SetConnection(connection base.Connection) {
r.Connection = connection
}
func (r *GetServerRes) Self() (res *GetServerRes, err error) {
err = models.ResolveLink(r, "self", res)
return
}
|
package controller
import (
"gopetstore/src/config"
"gopetstore/src/domain"
"gopetstore/src/service"
"gopetstore/src/util"
"log"
"net/http"
"path/filepath"
"strconv"
)
const (
viewOrderFile = "viewOrder.html"
initOrderFile = "initOrder.html"
confirmOrderFile = "confirmOrder.html"
shipFormFile = "shipForm.html"
listOrdersFile = "listOrders.html"
)
var (
viewOrderPath = filepath.Join(config.Front, config.Web, config.Order, viewOrderFile)
initOrderPath = filepath.Join(config.Front, config.Web, config.Order, initOrderFile)
confirmOrderPath = filepath.Join(config.Front, config.Web, config.Order, confirmOrderFile)
shipFormPath = filepath.Join(config.Front, config.Web, config.Order, shipFormFile)
listOrderPath = filepath.Join(config.Front, config.Web, config.Order, listOrdersFile)
)
// [ViewInitOrder] -> initOrder -> [ConfirmOrderStep1] -> shipForm -> [ConfirmShip] ->
// confirmOrder -> [ConfirmOrderStep2] -> viewOrder
// ViewOrderList -> viewOrder
// render init order
func ViewInitOrder(w http.ResponseWriter, r *http.Request) {
account := util.GetAccountFromSession(r)
if account != nil {
cart := util.GetCartFromSession(w, r, nil)
if cart != nil {
o := domain.NewOrder(account, cart)
s, err := util.GetSession(r)
if err != nil {
log.Printf("ViewInitOrder GetSession error: %v", err.Error())
}
if s != nil {
err = s.Save(config.OrderKey, o, w, r)
if err != nil {
log.Printf("ViewInitOrder GetSession error: %v", err.Error())
}
m := make(map[string]interface{})
m["Order"] = o
m["CreditCardTypes"] = []string{o.CardType}
err = util.RenderWithAccountAndCommonTem(w, r, m, initOrderPath)
if err != nil {
log.Printf("ViewInitOrder RenderWithAccountAndCommonTem error: %v", err.Error())
}
} else {
log.Print("ViewInitOrder session is nil")
}
} else {
log.Print("ViewInitOrder cart is nil")
}
} else {
// 跳转到登录页面
ViewLoginOrPostLogin(w, r)
}
}
// press confirm button step 1
func ConfirmOrderStep1(w http.ResponseWriter, r *http.Request) {
// get order from session
s, err := util.GetSession(r)
if err != nil {
log.Printf("ConfirmOrderStep1 GetSession error: %v", err.Error())
}
if s != nil {
re, _ := s.Get(config.OrderKey)
o := re.(*domain.Order)
// post parse form
err := r.ParseForm()
if err != nil {
log.Printf("ConfirmOrderStep1 ParseForm error: %v", err.Error())
return
}
o.CardType = r.FormValue("cardType")
o.CreditCard = r.FormValue("creditCard")
o.ExpiryDate = r.FormValue("expiryDate")
o.BillToFirstName = r.FormValue("firstName")
o.BillToLastName = r.FormValue("lastName")
o.BillAddress1 = r.FormValue("address1")
o.BillAddress2 = r.FormValue("address2")
o.BillCity = r.FormValue("city")
o.BillState = r.FormValue("state")
o.BillZip = r.FormValue("zip")
o.BillCountry = r.FormValue("country")
m := make(map[string]interface{})
m["Order"] = o
if len(r.FormValue("shippingAddressRequired")) > 0 {
// view shipForm
err := util.RenderWithAccountAndCommonTem(w, r, m, shipFormPath)
if err != nil {
log.Printf("ConfirmOrderStep1 RenderWithAccountAndCommonTem error: %v", err.Error())
}
} else {
// view confirmOrder
err := util.RenderWithAccountAndCommonTem(w, r, m, confirmOrderPath)
if err != nil {
log.Printf("ConfirmOrderStep1 RenderWithAccountAndCommonTem error: %v", err.Error())
}
}
}
}
// confirm ship
func ConfirmShip(w http.ResponseWriter, r *http.Request) {
// get order from session
s, err := util.GetSession(r)
if err != nil {
log.Printf("ConfirmShip GetSession error: %v", err.Error())
}
if s != nil {
re, _ := s.Get(config.OrderKey)
o := re.(*domain.Order)
err := r.ParseForm()
if err != nil {
log.Printf("ConfirmShip ParseForm error: %v", err.Error())
return
}
o.ShipToFirstName = r.FormValue("shipToFirstName")
o.ShipToLastName = r.FormValue("shipToLastName")
o.ShipAddress1 = r.FormValue("shipAddress1")
o.ShipAddress2 = r.FormValue("shipAddress2")
o.ShipCity = r.FormValue("shipCity")
o.ShipState = r.FormValue("shipState")
o.ShipZip = r.FormValue("shipZip")
o.ShipCountry = r.FormValue("shipCountry")
m := make(map[string]interface{})
m["Order"] = o
err = util.RenderWithAccountAndCommonTem(w, r, m, confirmOrderPath)
if err != nil {
log.Printf("ConfirmShip RenderWithAccountAndCommonTem error: %v", err.Error())
}
}
}
// create the final order
func ConfirmOrderStep2(w http.ResponseWriter, r *http.Request) {
// get order from session
s, err := util.GetSession(r)
if err != nil {
log.Printf("ConfirmShip GetSession error: %v", err.Error())
}
if s != nil {
re, _ := s.Get(config.OrderKey)
o := re.(*domain.Order)
err := service.InsertOrder(o)
if err != nil {
log.Printf("ConfirmOrderStep2 InsertOrder error: %v", err.Error())
return
}
// 清空购物车
err = s.Del(config.CartKey, w, r)
if err != nil {
log.Printf("ConfirmOrderStep2 session del cart error: %v", err.Error())
}
m := map[string]interface{}{
"Order": o,
}
err = util.RenderWithAccountAndCommonTem(w, r, m, viewOrderPath)
if err != nil {
log.Printf("ConfirmOrderStep2 RenderWithAccountAndCommonTem error: %v", err.Error())
}
}
}
// list orders
func ListOrders(w http.ResponseWriter, r *http.Request) {
s, err := util.GetSession(r)
if err != nil {
log.Printf("ListOrders GetSession error: %v", err.Error())
}
if s != nil {
re, ok := s.Get(config.AccountKey)
if ok {
a, ok := re.(*domain.Account)
if ok {
orders, err := service.GetOrdersByUserName(a.UserName)
if err != nil {
log.Printf("ListOrders GetOrdersByUserName error: %v", err.Error())
}
m := map[string]interface{}{
"OrderList": orders,
}
err = util.RenderWithAccountAndCommonTem(w, r, m, listOrderPath)
if err != nil {
log.Printf("ListOrders RenderWithAccountAndCommonTem error: %v", err.Error())
}
}
}
}
}
// check order
func CheckOrder(w http.ResponseWriter, r *http.Request) {
orderIdStr := util.GetParam(r, "orderId")[0]
orderId, err := strconv.Atoi(orderIdStr)
if err != nil {
log.Printf("CheckOrder error: %v", err.Error())
}
o, err := service.GetOrderByOrderId(orderId)
m := map[string]interface{}{
"Order": o,
}
err = util.RenderWithAccountAndCommonTem(w, r, m, viewOrderPath)
if err != nil {
log.Printf("CheckOrder RenderWithAccountAndCommonTem error: %v", err.Error())
}
}
|
package types
type Film struct {
ID int `json:"id"`
Name string `json:"name"`
Year int `json:"year"`
AddedAt string `json:"added_at"`
Genres []Genre `json:"genres, omitempty"`
}
type PostFilm struct {
*Film
Genres []int `json:"genres"`
}
type GetFilmParams struct {
Limit int `query:"limit"`
Offset int `query:"offset"`
Year int `query:"year"`
Genre string `query:"genre"`
}
|
package device
//go:generate go run gen/gen_streams.go
// #cgo CFLAGS: -g -Wall
// #cgo LDFLAGS: -lSoapySDR
// #include <stdlib.h>
// #include <stddef.h>
// #include <SoapySDR/Device.h>
// #include <SoapySDR/Formats.h>
// #include <SoapySDR/Types.h>
import "C"
import (
"errors"
"github.com/pothosware/go-soapy-sdr/pkg/sdrerror"
"unsafe"
)
// GetStreamFormats queries a list of the available stream formats.
//
// Format:
// The first character selects the number type:
// - "C" means complex
// - "F" means floating point
// - "S" means signed integer
// - "U" means unsigned integer
// The type character is followed by the number of bits per number (complex is 2x this size per sample)
// Example format strings:
// - "CF32" - complex float32 (8 bytes per element)
// - "CS16" - complex int16 (4 bytes per element)
// - "CS12" - complex int12 (3 bytes per element)
// - "CS4" - complex int4 (1 byte per element)
// - "S32" - int32 (4 bytes per element)
// - "U8" - uint8 (1 byte per element)
//
// Params:
// - direction the channel direction RX or TX
// - channel an available channel on the device
//
// Return a list of allowed format strings.
func (dev *SDRDevice) GetStreamFormats(direction Direction, channel uint) []string {
length := C.size_t(0)
info := C.SoapySDRDevice_getStreamFormats(dev.device, C.int(direction), C.size_t(channel), &length)
defer stringArrayClear(info, length)
return stringArray2Go(info, length)
}
// GetNativeStreamFormat gets the hardware's native stream format for this channel.
//
// This is the format used by the underlying transport layer, and the direct buffer access API calls (when available).
//
// Params:
// - direction: the channel direction RX or TX
// - channel: an available channel on the device
//
// Return the native stream buffer format string and the maximum possible value
func (dev *SDRDevice) GetNativeStreamFormat(direction Direction, channel uint) (format string, fullScale float64) {
scale := C.double(0.0)
val := (*C.char)(C.SoapySDRDevice_getNativeStreamFormat(dev.device, C.int(direction), C.size_t(channel), &scale))
defer C.free(unsafe.Pointer(val))
return C.GoString(val), float64(fullScale)
}
// GetStreamArgsInfo queries the argument info description for stream args.
//
// Params:
// - direction: the channel direction RX or TX
// - channel: an available channel on the device
//
// Return a list of argument info structures
func (dev *SDRDevice) GetStreamArgsInfo(direction Direction, channel uint) []SDRArgInfo {
length := C.size_t(0)
info := C.SoapySDRDevice_getStreamArgsInfo(dev.device, C.int(direction), C.size_t(channel), &length)
defer argInfoListClear(info, length)
return argInfoList2Go(info, length)
}
// ReadStreamStatus reads status information about a stream.
//
// This call is typically used on a transmit stream to report time errors, underflows, and burst completion.
//
// Client code may continually poll readStreamStatus() in a loop. Implementations of readStreamStatus() should wait in
// the call for a status change event or until the timeout expiration. When stream status is not implemented on a
// particular stream, readStreamStatus() should return SOAPY_SDR_NOT_SUPPORTED. Client code may use this indication to
// disable a polling loop.
//
// Params:
// - stream the stream from which to retrieve the status
// - chanMask to which channels this status applies
// - flags optional input flags and output flags
// - timeNs the buffer's timestamp in nanoseconds
// - timeoutUs the timeout in microseconds
//
// Return the buffer's timestamp in nanoseconds in case of success, an error otherwise
func readStreamStatus(stream SDRStream, chanMask []uint, flags []int, timeoutUs uint) (timeNs uint, err error) {
if uint(len(flags)) != stream.getNbChannels() {
return 0, errors.New("the flags buffer must have the same number of chanMask as the stream")
}
cFlags := (*C.int)(unsafe.Pointer(&flags[0]))
// Convert the requested chanMask to a list
channelMasks, _ := go2SizeTList(chanMask)
defer C.free(unsafe.Pointer(channelMasks))
cTimeNs := C.longlong(0)
result := int(
C.SoapySDRDevice_readStreamStatus(
stream.getDevice(),
stream.getStream(),
channelMasks,
cFlags,
&cTimeNs,
C.long(timeoutUs)))
if result < 0 {
return 0, sdrerror.Err(int(result))
}
return uint(cTimeNs), nil
}
// getNumDirectAccessBuffers returns How many direct access buffers can the stream provide.
//
// This is the number of times the user can call acquire() on a stream without making subsequent calls to
// release(). A return value of 0 means that direct access is not supported.
//
// Params:
// - stream the stream from which to retrieve the status
//
// Return the number of direct access buffers or 0
func getNumDirectAccessBuffers(stream SDRStream) uint {
return uint(C.SoapySDRDevice_getNumDirectAccessBuffers(stream.getDevice(), stream.getStream()))
}
|
// This file should be auto generated
package selectionsort
import (
"testing"
"github.com/seifer/go-dsa/sort/internal/testutil"
)
func TestInts(t *testing.T) {
input := testutil.InputInts()
Ints(input)
if !testutil.IsSortedInts(input) {
t.Fail()
}
}
|
package main
import (
"crypto/ecdsa"
"fmt"
"log"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
//"github.com/ethereum/go-ethereum/common"
"encoding/hex"
"github.com/sanguohot/medichain/util"
)
func main() {
privateKeyStr := "7aaf3e2786ff4b38f4aceb6f86ff4a3670206376087d4bd0f041f91e61412e66"
privateKey, err := crypto.HexToECDSA(privateKeyStr)
if err != nil {
log.Fatal(err)
}
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
log.Fatal("error casting public key to ECDSA")
}
publicKeyBytes := crypto.FromECDSAPub(publicKeyECDSA)
fmt.Println("publicKey ===>", hexutil.Encode(publicKeyBytes))
data := []byte("hello")
hash := crypto.Keccak256Hash(data)
fmt.Println("data ===>", string(data))
fmt.Println("keccak256Hash ===>", hash.Hex())
signature, err := crypto.Sign(hash.Bytes(), privateKey)
if err != nil {
log.Fatal(err)
}
fmt.Println(hexutil.Encode(signature)[2:]) // 85c4b5350fd0c3ff39d6f0984fe32cf92fb946a2639fd54b26765528809b7c702a9ff35d165b0f1e5345a111b92f3892b0af0a7ba1770248ef1e602272ec812700
signBytes, err := hex.DecodeString(hexutil.Encode(signature)[2:])
if err != nil {
log.Fatal(err)
}
r, s, v := util.SigRSV(signBytes)
fmt.Println("private key ===>", privateKeyStr)
fmt.Println("public key ===>", hexutil.Encode(publicKeyBytes)[4:])
fmt.Println("address ===>", crypto.PubkeyToAddress(*publicKeyECDSA).Hex())
fmt.Println("r ===>", hexutil.Encode(r[:])[:])
fmt.Println("s ===>",hexutil.Encode(s[:])[:])
fmt.Println("v ===>", v)
signatureNoRecoverID := signature[:len(signature)-1] // remove recovery id
verified := crypto.VerifySignature(publicKeyBytes, hash.Bytes(), signatureNoRecoverID)
fmt.Println(verified) // true
}
|
package models
import ()
type Lyric struct {
Id int
Artist *Artist
Track *Track
Content string
}
|
package main
import (
"encoding/binary"
"encoding/json"
"github.com/boltdb/bolt"
"log"
)
type Queue struct {
DB *bolt.DB
}
var QUEUE = []byte("QUEUE")
//Open the Queue DB and make sure a bucket exists for Tasks.
func OpenQueue() Queue {
db, err := bolt.Open("queued.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
db.Update(func(tx *bolt.Tx) error {
_, err = tx.CreateBucketIfNotExists(QUEUE)
return err
})
return Queue{DB: db}
}
//Generate a unique ID for the Task before inserting it into the Queue.
func (q *Queue) insertNewTask(t *Task) *Task {
q.DB.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(QUEUE)
id, _ := b.NextSequence()
t.ID = int(id)
return b.Put(itob(t.ID), t.toJSON())
})
return t
}
//Insert an updated Task into the Queue.
func (q *Queue) insertTask(t *Task) *Task {
q.DB.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(QUEUE)
return b.Put(itob(t.ID), t.toJSON())
})
return t
}
//Read the Task from the Queue.
func (q *Queue) readTask(id int) Task {
t := Task{}
q.DB.View(func(tx *bolt.Tx) error {
b := tx.Bucket(QUEUE)
json.Unmarshal(b.Get(itob(id)), &t)
return nil
})
return t
}
func itob(v int) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
//Close the Queue's DB. Should be defered by main.
func (q *Queue) Close() {
q.DB.Close()
}
|
package types
type PersonList []*Person
type PersonToBool func(*Person) bool
func (al PersonList)Filter(f PersonToBool) PersonList {
var ret PersonList
for _, a := range al {
if f(a) {
ret = append(ret, a)
}
}
return ret
}
|
package main
import (
"fmt"
)
func searchMatrix(matrix [][]int, target int) bool {
m := len(matrix)
if m == 0 {
return false
}
n := len(matrix[0])
if n == 0 {
return false
}
// 从左下角开始搜索
i := m - 1
j := 0
for i >= 0 && j < n {
if matrix[i][j] < target {
j++
} else if matrix[i][j] > target {
i--
} else {
return true
}
}
return false
}
func main() {
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"github.com/kataras/iris"
"github.com/kataras/iris/config"
"github.com/kataras/iris/websocket"
)
type clientPage struct {
Title string
Host string
}
type Message struct {
Message string `json:"message"`
User string `json:"user"`
}
type Resource struct {
Data string `json:"data"`
}
func main() {
api := iris.New()
api.Static("/static", "./templates/dist", 1)
wsConf := config.DefaultWebsocket()
wsConf.Endpoint = "/ws"
ws := websocket.New(api, wsConf, api.Logger)
var room = "room:1"
ws.OnConnection(func(c websocket.Connection) {
c.Join(room)
c.On("message", func(message string) {
c.To(room).Emit("message", toJson(&Message{
Message: message,
User: c.ID(),
}))
})
c.To(room).Emit("message", toJson(&Message{
Message: "System Message",
User: "system",
}))
c.OnDisconnect(func() {
fmt.Println("Connection closed: " + c.ID())
})
})
api.Listen(":9090")
}
func toJson(message *Message) []byte {
b, err := json.Marshal(message)
if err != nil {
log.Println(err)
}
return b
}
|
package engine
import (
chess "github.com/Yoshi-Exeler/chesslib"
opening "github.com/Yoshi-Exeler/chesslib/opening"
)
type byMVVLVA struct {
Nodes []*Node
Worker *Worker
TPV *chess.Move
Depth int
DepthRemaining int
Alpha int16
Beta int16
Max bool
Quiescence bool
}
func (a byMVVLVA) Len() int { return len(a.Nodes) }
func (a byMVVLVA) Swap(i, j int) { a.Nodes[i], a.Nodes[j] = a.Nodes[j], a.Nodes[i] }
func (a byMVVLVA) Less(i, j int) bool {
// Check for previously established pvs
if !a.Quiescence && SameMove(a.Nodes[i].Value, a.TPV) && !SameMove(a.Nodes[j].Value, a.TPV) {
return true
}
// Killer Moves have second highest priority
if a.Worker.IsKillerMove(a.Nodes[i].Value, a.Nodes[i].Depth) && !a.Worker.IsKillerMove(a.Nodes[j].Value, a.Nodes[j].Depth) {
return true
}
// Promotions will be searched next
if a.Nodes[i].Value.Promo() != chess.NoPieceType && a.Nodes[j].Value.Promo() == chess.NoPieceType {
return true
}
// If both moves are captures, sort them using MVVLVA
if a.Nodes[i].Value.HasTag(chess.Capture) && a.Nodes[j].Value.HasTag(chess.Capture) {
return a.Worker.captureValue(a.Nodes[i].Value) > a.Worker.captureValue(a.Nodes[j].Value)
}
// If one move is a capture search it first
if a.Nodes[i].Value.HasTag(chess.Check) && !a.Nodes[j].Value.HasTag(chess.Check) {
return true
}
// if a move is a check, search it before positionals
if a.Nodes[i].Value.HasTag(chess.Capture) && !a.Nodes[j].Value.HasTag(chess.Capture) {
return true
}
return false
}
// captureValue returns the material change caused by the capture
func (w *Worker) captureValue(move *chess.Move) int16 {
// Get the Victim and Attacker Pieces
victim := w.Simulation.Board().Piece(move.S2)
attacker := w.Simulation.Board().Piece(move.S1)
// Get the Values for the Pieces
victimValue := abs(pieceValues[victim])
attackerValue := abs(pieceValues[attacker])
// Calculate the Capture Differential and return it
return victimValue - attackerValue
}
type byOpeningLength []*opening.Opening
func (a byOpeningLength) Len() int { return len(a) }
func (a byOpeningLength) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byOpeningLength) Less(i, j int) bool { return len(a[i].PGN()) > len(a[j].PGN()) }
|
package router
import (
"testing"
"github.com/AsynkronIT/protoactor-go/actor"
"github.com/stretchr/testify/mock"
)
func TestPoolRouterActor_Receive_AddRoute(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1 := actor.NewLocalPID("p1")
c := new(mockContext)
c.On("Message").Return(&AddRoutee{p1})
c.On("Watch", p1).Once()
state.On("GetRoutees").Return(&actor.PIDSet{})
state.On("SetRoutees", actor.NewPIDSet(p1)).Once()
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c)
}
func TestPoolRouterActor_Receive_AddRoute_NoDuplicates(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1 := actor.NewLocalPID("p1")
c := new(mockContext)
c.On("Message").Return(&AddRoutee{p1})
state.On("GetRoutees").Return(actor.NewPIDSet(p1))
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c)
}
func TestPoolRouterActor_Receive_RemoveRoute(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1, pr1 := spawnMockProcess("p1")
defer removeMockProcess(p1)
pr1.On("SendUserMessage", p1, &actor.PoisonPill{}, nilPID).Once()
p2 := actor.NewLocalPID("p2")
c := new(mockContext)
c.On("Message").Return(&RemoveRoutee{p1})
c.On("Unwatch", p1).Once()
state.On("GetRoutees").Return(actor.NewPIDSet(p1, p2))
state.On("SetRoutees", actor.NewPIDSet(p2)).Once()
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c)
}
func TestPoolRouterActor_Receive_BroadcastMessage(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1 := actor.NewLocalPID("p1")
p2 := actor.NewLocalPID("p2")
child := new(mockProcess)
child.On("SendUserMessage", mock.Anything, mock.Anything, mock.Anything).Times(2)
actor.ProcessRegistry.Add(child, "p1")
actor.ProcessRegistry.Add(child, "p2")
defer func() {
actor.ProcessRegistry.Remove(&actor.PID{Id: "p1"})
actor.ProcessRegistry.Remove(&actor.PID{Id: "p2"})
}()
c := new(mockContext)
c.On("Message").Return(&BroadcastMessage{"hi"})
c.On("Sender").Return((*actor.PID)(nil))
state.On("GetRoutees").Return(actor.NewPIDSet(p1, p2))
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c, child)
}
|
package bt
import (
"bytes"
"context"
"crypto/sha1"
"errors"
"bufio"
"github.com/neoql/btlet/bencode"
"github.com/neoql/btlet/tools"
)
// FetchMetadata fetch metadata from host.
func FetchMetadata(ctx context.Context, infoHash string, host string) (RawMeta, error) {
// connect to peer
var reserved uint64
SetExtReserved(&reserved)
stream, err := DialUseTCP(host, infoHash, tools.RandomString(20), reserved)
if err != nil {
return nil, err
}
defer stream.Close()
// peer send different info_hash
if stream.InfoHash() != infoHash {
return nil, errors.New("handshake failed: different info_hash")
}
if !CheckExtReserved(stream.Reserved()) {
// not support extensions
return nil, errors.New("not support extensions")
}
proto := NewExtProtocol()
r := bufio.NewReader(stream)
w := bufio.NewWriter(stream)
fmExt := NewFetchMetaExt(infoHash)
proto.RegistExt(fmExt)
err = proto.WriteHandshake(w)
if err != nil {
return nil, err
}
err = w.Flush()
if err != nil {
return nil, err
}
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
message, err := ReadMessageWithLimit(r, 99999)
if err != nil {
return nil, err
}
if len(message) == 0 || message[0] != ExtID {
continue
}
err = proto.HandlePayload(message[1:], w)
if err != nil {
return nil, err
}
err = w.Flush()
if err != nil {
return nil, err
}
if !fmExt.IsSupoort() {
return nil, errors.New("not support Extension for Peers to Send Metadata Files")
}
if fmExt.CheckDone() {
meta, err := fmExt.FetchRawMeta()
if err == nil {
return meta, nil
}
}
}
}
const (
request = iota
data
reject
)
const (
maxPieceSize = 16 * 1024
)
// FetchMetaExt is extension can fetch metadata
type FetchMetaExt struct {
infoHash string
pieces [][]byte
support bool
}
// NewFetchMetaExt returns a new FetchMetaExt
func NewFetchMetaExt(infoHash string) *FetchMetaExt {
return &FetchMetaExt{
infoHash: infoHash,
}
}
// MapKey implements Extension.MapKey
func (fm *FetchMetaExt) MapKey() string {
return "ut_metadata"
}
// IsSupoort returns true if peer support this extension other false.
// Should use it after extesion handshake
func (fm *FetchMetaExt) IsSupoort() bool {
return fm.support
}
// BeforeHandshake implements Extension.BeforeHandshake
func (fm *FetchMetaExt) BeforeHandshake(hs ExtHSPutter) {}
// AfterHandshake implements Extension.AfterHandshake
func (fm *FetchMetaExt) AfterHandshake(hs ExtHSGetter, sender *ExtMsgSender) error {
fm.support = true
var size int64
ok := hs.Get("metadata_size", &size)
if !ok {
return errors.New("don't known metadata size")
}
if size <= 0 {
return errors.New("wrong size")
}
piecesNum := getPiecesNum(size)
fm.pieces = make([][]byte, piecesNum)
for i := 0; i < piecesNum; i++ {
m := map[string]int{
"msg_type": request,
"piece": i,
}
b, err := bencode.Marshal(m)
if err != nil {
return err
}
err = sender.SendBytes(b)
if err != nil {
return err
}
}
return nil
}
// Unsupport implements Extension.Unsupport
func (fm *FetchMetaExt) Unsupport() {
fm.support = false
}
// HandleMessage implements Extension.HandleMessage
func (fm *FetchMetaExt) HandleMessage(content []byte, sender *ExtMsgSender) error {
var msg map[string]int
dec := bencode.NewDecoder(bytes.NewReader(content))
err := dec.Decode(&msg)
if err != nil {
return err
}
switch msg["msg_type"] {
default:
case reject:
return errors.New("peer reject out request")
case data:
no := msg["piece"]
fm.pieces[no] = content[dec.BytesParsed():]
}
return nil
}
// CheckDone if download all pieces returns true else false
func (fm *FetchMetaExt) CheckDone() bool {
for _, piece := range fm.pieces {
if len(piece) == 0 {
return false
}
}
return true
}
// FetchRawMeta get the raw metadata
func (fm *FetchMetaExt) FetchRawMeta() (RawMeta, error) {
metadata := bytes.Join(fm.pieces, nil)
hash := sha1.Sum(metadata)
if bytes.Equal(hash[:], []byte(fm.infoHash)) {
return metadata, nil
}
return nil, errors.New("metadata's sha1 hash is different from info_hash")
}
func getPiecesNum(size int64) int {
piecesNum := size / maxPieceSize
if size%maxPieceSize != 0 {
piecesNum++
}
return int(piecesNum)
}
|
package models
import (
"encoding/json"
"io/ioutil"
"testing"
)
func BenchmarkCreateSubmission(b *testing.B) {
data, _ := ioutil.ReadFile("./tests/submission.json")
submissionExampleJson := string(data)
for i := 0; i < b.N; i++ {
sub := Submission{}
json.Unmarshal([]byte(submissionExampleJson), &sub)
}
}
|
package main
import "fmt"
func main() {
//khai bao array
var myArray [4]int
fmt.Println(myArray) // [0 0 0 0]
// phần tử mảng chưa đc gán giá trị sẽ đc gán mặc định 0
myArray[0] = 11
myArray[1] = 23
fmt.Println(myArray) // [11 23 0 0]
//khai báo có khởi tạo giá trị
arrays := [3]int{1, 2, 3}
// var arrays = [3]int {1,2,3}
fmt.Println(arrays)
// ko thể sử dụng phần tử ngoài khai báo, ví dụ arrays[3]
//check size mảng
fmt.Println(len(myArray))
// khởi tạo ko khai báo size
arrays1 := [...]string{"nguyen", "thai", "duong"}
fmt.Println(arrays1)
// có thể gán 2 mảng cho nhau
copyArrays1 := arrays1
fmt.Println(copyArrays1)
// mảng copyArrays1 và arrays1 là hoàn toàn độc lập với nhau
// ko giống trong C++ là 2 mảng cùng trỏ về 1 vùng nhớ
// duyệt mảng, dùng for
// cách 1
for i := 0; i < len(arrays); i++ {
//
}
// cách 2
for index, value := range arrays1 {
fmt.Printf("i = %d value = %s", index, value)
fmt.Println()
}
// nếu chỉ làm việc với 1 trong 2 phần tử index, value thay phần tử đó bằng '_'
for _, value := range arrays1 {
fmt.Printf("value = %s", value)
fmt.Println()
}
//mảng 2 chiều
matrix := [4][2]int{
{1, 2},
{3, 4},
{5, 6},
{7, 8},
}
fmt.Println(matrix)
for i := 0; i < 4; i++ {
for j := 0; j < 2; j++ {
fmt.Print(matrix[i][j], " ")
}
fmt.Println()
}
}
|
package main
import (
"context"
"fmt"
"net/http"
"os"
"github.com/DataDog/datadog-go/statsd"
"github.com/google/uuid"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
"github.com/jrxfive/superman-detector/handlers/healthz"
v1 "github.com/jrxfive/superman-detector/handlers/v1"
"github.com/jrxfive/superman-detector/internal/pkg/settings"
"github.com/jrxfive/superman-detector/internal/pkg/signals"
customMiddleware "github.com/jrxfive/superman-detector/middleware"
"github.com/jrxfive/superman-detector/models"
"github.com/jrxfive/superman-detector/pkg/geoip"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
func signalMonitor(signalChannel chan os.Signal, e *echo.Echo) {
s := <-signalChannel
e.Logger.Warnf("signal interrupt detected:%s attempting graceful shutdown", s.String())
err := e.Shutdown(context.Background())
if err != nil {
e.Logger.Warnf("failed to gracefully shutdown", s.String())
}
os.Exit(0)
}
func main() {
//Configuration
s := settings.NewSettings()
//Echo
e := echo.New()
//Statsd Telemetry
statsdClient, err := statsd.New(s.StatsdAddress, func(options *statsd.Options) error {
options.Namespace = s.StatsdNamespace
options.Tags = []string{fmt.Sprintf("app:%s", s.StatsdNamespace)}
options.BufferPoolSize = s.StatsdBufferPoolSize
options.Telemetry = false
return nil
})
defer func() {
err = statsdClient.Close()
e.Logger.Error(err)
}()
//Middleware
e.Use(middleware.Logger())
e.Use(middleware.RequestIDWithConfig(middleware.RequestIDConfig{
Generator: func() string {
return uuid.New().String()
},
}))
e.Use(middleware.GzipWithConfig(middleware.GzipConfig{
Level: 5,
}))
e.Use(middleware.SecureWithConfig(middleware.SecureConfig{
XSSProtection: "",
ContentTypeNosniff: "",
XFrameOptions: "",
HSTSMaxAge: 3600,
ContentSecurityPolicy: "default-src 'self'",
}))
e.Use(middleware.BodyLimit(s.RequestBodyLimit))
e.Use(customMiddleware.NewStats(statsdClient).Process)
//Signal Monitor
smc := signals.NewSignalMonitoringChannel()
go signalMonitor(smc, e)
//Databases
geoDB, err := geoip.NewDefaultLocator(s)
if err != nil {
e.Logger.Fatalf("failed to open geo database err:%s", err.Error())
}
defer func() {
err = geoDB.Close()
e.Logger.Error(err)
}()
db, err := gorm.Open(s.SqlDialect, s.SqlConnectionString)
if err != nil {
e.Logger.Fatalf("failed to open database err:%s", err.Error())
}
defer func() {
err = db.Close()
e.Logger.Error(err)
}()
//Database create if missing, no-op if created
if !db.HasTable(&models.LoginEvent{}) {
db.CreateTable(&models.LoginEvent{})
}
//Handler Creation
health := healthz.NewHealthz(db.DB(), statsdClient)
//v1 Handler Creation
login := v1.NewLogin(db, geoDB, statsdClient, s)
//Handlers Registration
e.GET("/healthz", health.GetHealthz)
e.HEAD("/healthz", health.HeadHealthz)
v1Group := e.Group("/v1")
v1Group.POST("", login.PostLogin)
server := &http.Server{
Addr: fmt.Sprintf(":%d", s.ServicePort),
ReadTimeout: s.ServerReadTimeoutSeconds,
ReadHeaderTimeout: s.ServerReadTimeoutSeconds,
WriteTimeout: s.ServerWriteTimeoutSeconds,
}
e.Logger.Fatal(e.StartServer(server))
}
|
// Package calculator provides a library for simple calculations in Go.
package calculator
import (
"errors"
"fmt"
"math"
)
// Add takes two numbers and returns the result of adding them together.
func Add(a, b float64) float64 {
return a + b
}
// Subtract takes two numbers and returns the result of subtracting the second
// from the first.
func Subtract(a, b float64) float64 {
return a - b
}
// Multiply takes two numbers as parameters, and returns a single number representing the result.
func Multiply(a, b float64) float64 {
return a * b
}
// Divide takes two numbers as parameters, and returns first divide by second representing the result.
func Divide(a, b float64) (float64, error) {
if b == 0 {
return 0, errors.New("division by zero is undefined")
}
return a / b, nil
}
// Sqrt returns the square root of a.
func Sqrt(a float64) (float64, error) {
if a < 0 {
return 0, fmt.Errorf("%f: Sqrt of negative number is undefined", a)
}
return math.Sqrt(a), nil
}
|
package controllers
import (
"fmt"
"math/rand"
"strings"
"time"
aliyunsmsclient "github.com/KenmyZhang/aliyun-communicate"
"github.com/astaxie/beego/config"
)
/*UtilsController 工具类 */
type UtilsController struct {
MainController
}
/*GenValidateCode 生成随机验证码 */
func (c *UtilsController) GenValidateCode(length int) string {
numeric := [10]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
r := len(numeric)
rand.Seed(time.Now().UnixNano())
var code strings.Builder
for i := 0; i < length; i++ {
fmt.Fprintf(&code, "%d", numeric[rand.Intn(r)])
}
return code.String()
}
/*SendMessage 给用户发送短信*/
func (c *UtilsController) SendMessage(phone string) bool {
iniconf, err := config.NewConfig("ini", "conf/aliyun.conf")
if err != nil {
return false
}
code := c.GenValidateCode(4)
// 短信天极流量控制
c.SetSession("code", code)
fmt.Println("code", code)
var (
gatewayURL = "http://dysmsapi.aliyuncs.com/"
accessKeyID = iniconf.String("AccessKeyID")
accessKeySecret = iniconf.String("AccessKeySecret")
phoneNumbers = phone
signName = "chatApp"
templateCode = iniconf.String("templateCode")
templateParam = "{\"code\":\"" + code + "\"}"
)
smsClient := aliyunsmsclient.New(gatewayURL)
result, _ := smsClient.Execute(accessKeyID, accessKeySecret, phoneNumbers, signName, templateCode, templateParam)
if result.IsSuccessful() {
// 保存短信验证码
c.SetSession("code", code)
return true
}
return false
}
|
package server
// type serverContext struct {
// *sqlx.DB
// }
//
// type server struct {
// context serverContext
// }
//
// func New(dbFile string) (server, error) {
// tdb, err := db.OpenDatabase(dbFile)
// if err != nil {
// return server{}, err
// }
// return server{serverContext{tdb}}, nil
// }
//
// func (s server) Run() error {
// r := mux.NewRouter()
// //r.Methods("GET").Path("/locations").Handler(Logger(s.handler.GetLocations, "GetLocations"))
// //r.Methods("GET").Path("/logs").Handler(Logger(s.handler.GetLogs, "GetLogs"))
// //r.Methods("GET").PathPrefix("/").Handler(http.FileServer("./public-html"))
// return http.ListenAndServe(":8080", r)
// }
//
// func (s server) Close() error {
// return s.handler.tdb.Close()
// }
|
package gbm
import (
"bytes"
"encoding/json"
"os"
)
const BUFSIZE = 1024
type GbLeaf struct {
Split *int `json:"split"`
SplitCondition *float64 `json:"split_condition"`
Yes *int `json:"yes"`
No *int `json:"no"`
Missing *int `json:"missing"`
Leaf *float64 `json:"leaf"`
}
type GbTree []GbLeaf
func (gbt GbTree) Len() int {
return len(gbt)
}
type Model []GbTree
func (m Model) Len() int {
return len(m)
}
func (m Model) Predict(d []float64) float64 {
n_tree := m.Len()
sum_ret := float64(0)
for i := 0; i < n_tree; i++ {
sum_ret += predict(m[i], d, 0)
}
return sum_ret + 0.5
}
func predict(gbt GbTree, d []float64, leaf int) float64 {
if gbt[leaf].Leaf != nil {
return *(gbt[leaf].Leaf)
}
if d[*(gbt[leaf].Split)] < *(gbt[leaf].SplitCondition) {
return predict(gbt, d, *(gbt[leaf].Yes))
} else {
return predict(gbt, d, *(gbt[leaf].No))
}
}
func NewModel(path string) (*Model, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var jbuf bytes.Buffer
buf := make([]byte, BUFSIZE)
for {
n, err := file.Read(buf)
if n == 0 {
break
}
if err != nil {
return nil, err
}
jbuf.Write(buf[:n])
}
model := new(Model)
if err := json.Unmarshal(jbuf.Bytes(), model); err != nil {
return nil, err
}
return model, nil
}
|
package utils
import (
"bytes"
"encoding/binary"
"fmt"
"sort"
)
/*
creating a tablet for insertion
for example, considering device: root.sg1.d1
timestamps, m1, m2, m3
1, 125.3, True, text1
2, 111.6, False, text2
3, 688.6, True, text3
Notice: The tablet should not have empty cell
The tablet will be sorted at the initialization by timestamps
:param deviceId: String, IoTDB time series path to device layer (without sensor).
:param measurements: List, sensors.
:param dataTypes: TSDataType List, specify value types for sensors.
:param values: 2-D List, the values of each row should be the outer list element.
:param timestamps: List.
*/
type Tablet struct {
deviceId string
measurements []string
dataTypes []int32
values [][]interface{}
timestamps []int64
columnNumber int
rowNumber int
}
func NewTablet(deviceId string, measurements []string, dataTypes []int32, values [][]interface{}, timestamps []int64) (t_ *Tablet) {
if len(timestamps) != len(values) || len(dataTypes) != len(measurements) {
fmt.Println("NewTablet: Length not match!")
return nil
}
if !checkSorted(timestamps) {
timestamps, values = sortBinding(timestamps, values)
}
// sorted
return &Tablet{deviceId: deviceId, measurements: measurements, dataTypes: dataTypes, values: values, timestamps: timestamps, columnNumber: len(measurements), rowNumber: len(timestamps)}
}
func (t_ *Tablet) GetMeasurements() []string {
return t_.measurements
}
func (t_ *Tablet) GetDataTypes() []int32 {
return t_.dataTypes
}
func (t_ *Tablet) GetRowNumber() int {
return t_.rowNumber
}
func (t_ *Tablet) GetColumnNumber() int {
return t_.columnNumber
}
func (t_ *Tablet) GetDeviceId() string {
return t_.deviceId
}
func (t_ *Tablet) GetTimestampsBinary() []byte {
buf := new(bytes.Buffer)
for _, v := range t_.timestamps {
err := binary.Write(buf, binary.BigEndian, v)
if err != nil {
fmt.Println("Tablet binary.Write TimeStamp failed:", err)
return nil
}
}
return buf.Bytes()
}
func (t_ *Tablet) GetValuesBinary() []byte {
buf := new(bytes.Buffer)
for j := 0; j < len(t_.measurements); j++ {
if t_.dataTypes[j] == TSDataType.TEXT {
for i := 0; i < len(t_.timestamps); i++ {
if v_str, ok := t_.values[i][j].(string); ok {
v_bytes := []byte(v_str)
err := binary.Write(buf, binary.BigEndian, int32(len(v_bytes)))
if err != nil {
panic(fmt.Sprintln("Tablet binary.Write TEXT failed1:", err))
return nil
}
err = binary.Write(buf, binary.BigEndian, v_bytes)
if err != nil {
panic(fmt.Sprintln("Tablet binary.Write TEXT failed2:", err))
return nil
}
} else {
panic(fmt.Sprintf("value is not type string, i[%v] j[%v]\n", i, j))
return nil
}
}
} else {
for i := 0; i < len(t_.timestamps); i++ {
switch t_.dataTypes[j] {
case TSDataType.BOOLEAN:
{
_, ok := t_.values[i][j].(bool)
if !ok {
panic("value is not type bool")
return nil
}
}
case TSDataType.INT32:
{
_, ok := t_.values[i][j].(int32)
if !ok {
panic("value is not type int32")
return nil
}
}
case TSDataType.INT64:
{
_, ok := t_.values[i][j].(int64)
if !ok {
panic("value is not type int64")
return nil
}
}
case TSDataType.FLOAT:
{
_, ok := t_.values[i][j].(float32)
if !ok {
panic("value is not type float32")
return nil
}
}
case TSDataType.DOUBLE:
{
_, ok := t_.values[i][j].(float64)
if !ok {
panic("value is not type float64")
return nil
}
}
default:
panic("Unsupported DataType!!!")
break
}
err := binary.Write(buf, binary.BigEndian, t_.values[i][j])
if err != nil {
panic(fmt.Sprintf("binary.Write failed:{%v}\n", err))
return nil
}
}
}
}
return buf.Bytes()
}
func sortBinding(timestamps []int64, values [][]interface{}) ([]int64, [][]interface{}) {
type bind struct {
time int64
value []interface{}
}
bindings := make([]bind, len(timestamps))
for k, v := range values {
bindings[k] = bind{timestamps[k], v}
}
sort.Slice(bindings, func(i, j int) bool {
return bindings[i].time < bindings[j].time
})
for k, v := range bindings {
timestamps[k] = v.time
values[k] = v.value
}
return timestamps, values
}
func checkSorted(timestamps []int64) bool {
for i := 0; i < len(timestamps)-1; i++ {
if timestamps[i] > timestamps[i+1] {
return false
}
}
return true
}
|
package nfs
import (
"context"
"fmt"
nfsstoragev1alpha1 "github.com/johandry/nfs-operator/api/v1alpha1"
"github.com/johandry/nfs-operator/resources"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var _ resources.Reconcilable = &Deployment{}
// Deployment is the Deployment resource used by the Nfs controller
type Deployment struct {
Owner *nfsstoragev1alpha1.Nfs
}
var yamlDeployment = []byte(`
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner
spec:
selector:
matchLabels:
app: nfs-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:latest
ports:
- name: nfs
containerPort: 2049
- name: nfs-udp
containerPort: 2049
protocol: UDP
- name: nlockmgr
containerPort: 32803
- name: nlockmgr-udp
containerPort: 32803
protocol: UDP
- name: mountd
containerPort: 20048
- name: mountd-udp
containerPort: 20048
protocol: UDP
- name: rquotad
containerPort: 875
- name: rquotad-udp
containerPort: 875
protocol: UDP
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
- name: statd
containerPort: 662
- name: statd-udp
containerPort: 662
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=ibmcloud/nfs"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
persistentVolumeClaim:
claimName: nfs-block-custom
`)
// new returns the object as a apps.v1.Deployment
func (r *Deployment) new() *appsv1.Deployment {
replicas := int32(1)
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Namespace: r.Owner.Namespace,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": appName,
},
},
Replicas: &replicas,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RecreateDeploymentStrategyType,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": appName,
},
},
Spec: corev1.PodSpec{
ServiceAccountName: appName,
Containers: []corev1.Container{
{
Name: appName,
Image: imageName,
Ports: []corev1.ContainerPort{
{
Name: "nfs",
ContainerPort: 2049,
},
{
Name: "nfs-udp",
ContainerPort: 2049,
Protocol: corev1.ProtocolUDP,
},
{
Name: "nlockmgr",
ContainerPort: 32803,
},
{
Name: "nlockmgr-udp",
ContainerPort: 32803,
Protocol: corev1.ProtocolUDP,
},
{
Name: "mountd",
ContainerPort: 20048,
},
{
Name: "mountd-udp",
ContainerPort: 20048,
Protocol: corev1.ProtocolUDP,
},
{
Name: "rquotad",
ContainerPort: 875,
},
{
Name: "rquotad-udp",
ContainerPort: 875,
Protocol: corev1.ProtocolUDP,
},
{
Name: "rpcbind",
ContainerPort: 111,
},
{
Name: "rpcbind-udp",
ContainerPort: 111,
Protocol: corev1.ProtocolUDP,
},
{
Name: "statd",
ContainerPort: 662,
},
{
Name: "statd-udp",
ContainerPort: 662,
Protocol: corev1.ProtocolUDP,
},
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"DAC_READ_SEARCH",
"SYS_RESOURCE",
},
},
},
Args: []string{
fmt.Sprintf("-provisioner=%s", provisionerName),
},
Env: []corev1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
{
Name: "SERVICE_NAME",
Value: appName,
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
ImagePullPolicy: corev1.PullIfNotPresent,
VolumeMounts: []corev1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "export-volume",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: r.Owner.Spec.BackingStorage.Name,
},
},
},
},
},
},
},
}
}
// toResource returns the given object as a apps.v1.Deployment
func (r *Deployment) toResource(ro runtime.Object) (*appsv1.Deployment, error) {
if v, ok := ro.(*appsv1.Deployment); ok {
return v, nil
}
return nil, fmt.Errorf("the received object is not a apps/v1.Deployment")
}
// isValid returns true if the given object is valid. If it's valid won't be updated
func (r *Deployment) isValid(o *appsv1.Deployment) bool {
return true
}
// Object implements the Object method of the Reconcilable interface
func (r *Deployment) Object() runtime.Object {
return r.new()
}
// SetControllerReference implements the SetControllerReference method of the Reconcilable interface
func (r *Deployment) SetControllerReference(scheme *runtime.Scheme) error {
obj := r.new()
var err error
if scheme != nil {
err = ctrl.SetControllerReference(r.Owner, obj, scheme)
}
return err
}
// Get implements the Get method of the Reconcilable interface
func (r *Deployment) Get(ctx context.Context, c client.Client) (runtime.Object, error) {
found := &appsv1.Deployment{}
obj := r.new()
err := c.Get(ctx, types.NamespacedName{Name: obj.Name, Namespace: obj.Namespace}, found)
if err == nil {
return found, nil
}
return nil, client.IgnoreNotFound(err)
}
// Validate implements the Validate method of the Reconcilable interface
func (r *Deployment) Validate(ro runtime.Object) bool {
current, err := r.toResource(ro)
if err != nil {
return false
}
return r.isValid(current)
}
|
package groupsimilar
import (
"fmt"
"hash"
"unicode/utf8"
)
var (
splits = []rune{',', '.', '!', ' ', ','}
)
type StringVector struct {
hashfn hash.Hash64
}
func NewStringVector(fn hash.Hash64) *StringVector {
stringVector := new(StringVector)
stringVector.hashfn = fn
return stringVector
}
func toRunes(group string) []rune {
ret := make([]rune, 0)
for len(group) > 0 {
r, size := utf8.DecodeRuneInString(group)
ret = append(ret, r)
fmt.Printf("%c, %v\n, %c", r, size, ret)
group = group[size:]
}
fmt.Println("切换成字符串之后:", ret)
return ret
}
func (sv *StringVector) ToVecotor(group string) []uint64 {
runes := toRunes(group)
matrix := make([][]rune, 0)
temp_matrix := make([]rune, 0)
start_index := 0
end_index := 0
for index, value := range runes {
is_in := false
for _, r := range splits {
fmt.Println(r, value)
if r == value {
is_in = true
break
}
}
if is_in {
if start_index != end_index {
temp_matrix = runes[start_index:end_index]
matrix = append(matrix, temp_matrix)
temp_matrix = make([]rune, len(runes)-start_index)
}
start_index = index + 1
end_index = index + 1
} else {
end_index = end_index + 1
}
}
if start_index != len(runes) {
matrix = append(matrix, runes[start_index:])
fmt.Println(start_index, end_index)
}
fmt.Println("matrix", len(matrix), matrix)
ret := make([]uint64, len(matrix))
for index, mx := range matrix {
buf := make([]byte, 0)
for _, x := range mx {
temp_buf := make([]byte, utf8.RuneLen(x))
utf8.EncodeRune(temp_buf, x)
buf = addSlice(buf, temp_buf)
}
sv.hashfn.Reset()
sv.hashfn.Write(buf)
ret[index] = sv.hashfn.Sum64()
}
return ret
}
func addSlice(a, b []byte) []byte {
c := make([]byte, len(a)+len(b))
copy(c, a)
copy(c[len(a):], b)
return c
}
|
package mock
import (
"github.com/10gen/realm-cli/internal/telemetry"
)
// TelemetryService is a mocked telemetry service
type TelemetryService struct {
telemetry.Service
TrackEventFn func(eventType telemetry.EventType, data ...telemetry.EventData)
CloseFn func()
}
// TrackEvent calls the mocked TrackEvent implementation if provided,
// otherwise the call falls back to the underlying telemetry.Service implementation.
// NOTE: this may panic if the underlying telemetry.Service is left undefined
func (s TelemetryService) TrackEvent(eventType telemetry.EventType, data ...telemetry.EventData) {
if s.TrackEventFn != nil {
s.TrackEventFn(eventType, data...)
return
}
s.Service.TrackEvent(eventType, data...)
}
// Close calls the mocked Close implementation if provided,
// otherwise the call falls back to the underlying telemetry.Service implementation.
// NOTE: this may panic if the underlying telemetry.Service is left undefined
func (s TelemetryService) Close() {
if s.CloseFn != nil {
s.CloseFn()
return
}
s.Service.Close()
}
|
package replay
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/url"
"runtime"
)
// Filter function is used to determine if a given http.Request should be replayed or not.
type Filter func(*http.Request) bool
// Modifier function is used to modify a given http.Request before replaying it.
type Modifier func(*http.Request)
// Handler function is used to
type Handler func(error, *http.Response, *http.Request)
// Replayer replays incoming HTTP traffic to one or multiple servers via isolated goroutine.
type Replayer struct {
// Targets stores the replay URL targets.
Targets []string
// Filters stores the replay filter functions.
Filters []Filter
// Modifiers stores the replay request modifier functions.
Modifiers []Modifier
// Handler stores the optional replay response/error handler.
Handler Handler
// Client stores the http.Client to be used to replay the requests.
// Defaults to http.DefaultClient.
Client *http.Client
}
// New creates a new replayer ready to be attached as middleware.
func New(targets ...string) *Replayer {
return &Replayer{Targets: targets, Client: http.DefaultClient}
}
// Modify attaches a new modifier function to the current replayer who
// is responsible to modify the http.Request to be replayed before the replay.
func (x *Replayer) Modify(fn ...Modifier) *Replayer {
x.Modifiers = append(x.Modifiers, fn...)
return x
}
// Filter attaches a new filter function to the current replayer who
// determines if a given request should be replayed or not.
func (x *Replayer) Filter(fn ...Filter) *Replayer {
x.Filters = append(x.Filters, fn...)
return x
}
// SetHandler is used to set a replay request handler, allowing the developer
// to deal with the replay response or error accordingly.
func (x *Replayer) SetHandler(fn Handler) *Replayer {
x.Handler = fn
return x
}
// HandleHTTP handles an incoming HTTP request received by the proxy.
func (x *Replayer) HandleHTTP(w http.ResponseWriter, r *http.Request, h http.Handler) {
defer h.ServeHTTP(w, r)
if len(x.Targets) == 0 {
return
}
// Filter request to determine if should be replayed
for _, filter := range x.Filters {
if !filter(r) {
return
}
}
// Restore body to be consume by subsequent layers.
var body io.Reader
if r.Body != nil {
buf, _ := ioutil.ReadAll(r.Body)
body = bytes.NewReader(buf)
r.Body = ioutil.NopCloser(body)
}
for _, target := range x.Targets {
go x.Replay(r, target, body)
}
}
// Replay replays the given http.Request to the given target hostname.
func (x *Replayer) Replay(r *http.Request, target string, body io.Reader) {
// Create the replay request
req, err := NewReplayRequest(r, target, body)
if err != nil {
if x.Handler != nil {
x.Handler(err, nil, req)
}
return
}
// Trigger the request modifiers
for _, modifier := range x.Modifiers {
modifier(req)
}
res, err := x.Client.Do(req)
if transport, ok := x.Client.Transport.(*http.Transport); ok {
EnsureTransporterFinalized(transport)
}
if x.Handler != nil {
x.Handler(err, res, req)
}
}
// NewReplayRequest creates a new http.Request cloning on the given one replacing the target URL host.
func NewReplayRequest(req *http.Request, host string, body io.Reader) (*http.Request, error) {
target, err := url.Parse(host)
if err != nil {
return nil, err
}
// Clone request
r, err := http.NewRequest(req.Method, req.URL.String(), body)
r.URL.Host = target.Host
r.URL.Scheme = target.Scheme
r.Header = req.Header
r.TLS = req.TLS
r.Host = req.Host
r.RemoteAddr = req.RemoteAddr
return r, nil
}
// EnsureTransporterFinalized will ensure that when the HTTP client is GCed
// the runtime will close the idle connections (so that they won't leak)
// this function was adopted from Hashicorp's go-cleanhttp package.
func EnsureTransporterFinalized(httpTransport *http.Transport) {
runtime.SetFinalizer(&httpTransport, func(transportInt **http.Transport) {
(*transportInt).CloseIdleConnections()
})
}
|
package config
var constantValue map[string]interface{}
func SetConstantValue(key string, value interface{}) {
if constantValue == nil {
constantValue = make(map[string]interface{})
}
constantValue[key] = value
}
func GetConstantValue(key string) interface{} {
if val, ok := constantValue[key]; ok {
return val
}
return nil
}
|
package _862_Shortest_Subarray_with_Sum_at_Least_K
type mem struct {
idx int
sum int
}
func shortestSubarray(nums []int, k int) int {
sum := 0
queue := []mem{{idx: -1, sum: sum}}
res := len(nums) + 1
for idx, num := range nums {
sum += num
for len(queue) > 0 && sum-queue[0].sum >= k {
res = min(res, idx-queue[0].idx)
queue = queue[1:]
}
for len(queue) > 0 && sum <= queue[len(queue)-1].sum {
queue = queue[:len(queue)-1]
}
queue = append(queue, mem{idx: idx, sum: sum})
}
if res == len(nums)+1 {
res = -1
}
return res
}
func min(a, b int) int {
if a < b {
return a
} else {
return b
}
}
|
package hikoqiuclient
import (
"eureka/vars"
"fmt"
"time"
"github.com/HikoQiu/go-eureka-client/eureka"
)
// instance 정보에 meta 필드가 없음.
type HikoQiuClient struct {
instanceID string
vo *eureka.InstanceVo
config *eureka.EurekaClientConfig
cli *eureka.Client
api *eureka.EurekaServerApi
ticker *time.Ticker
done chan bool
}
func NewClient() *HikoQiuClient {
client := &HikoQiuClient{}
client.vo = eureka.DefaultInstanceVo()
client.vo.App = vars.AppName
client.vo.Hostname = vars.Hostname
client.vo.Status = eureka.STATUS_UP
client.vo.Port.Value = vars.LocalPort
client.vo.Port.Enabled = "true"
return client
}
func (c *HikoQiuClient) Register() {
c.config = eureka.GetDefaultEurekaClientConfig()
c.config.UseDnsForFetchingServiceUrls = false
c.config.AutoUpdateDnsServiceUrls = false
c.config.Region = eureka.DEFAULT_REGION
c.config.AvailabilityZones = map[string]string{
eureka.DEFAULT_REGION: eureka.DEFAULT_ZONE,
}
c.config.ServiceUrl = map[string]string{
eureka.DEFAULT_ZONE: vars.EurekaServerURL,
}
c.cli = eureka.DefaultClient.Config(c.config)
c.api, _ = c.cli.Api()
c.instanceID, _ = c.api.RegisterInstanceWithVo(c.vo)
c.sendHeartBeat()
}
func (c *HikoQiuClient) sendHeartBeat() {
c.ticker = time.NewTicker(time.Duration(vars.HeartBeatInterval) * time.Second)
c.done = make(chan bool, 1)
go func() {
defer func() {
fmt.Println("stop SendHeartBeat")
c.ticker.Stop()
}()
for {
select {
case <-c.ticker.C:
c.api.SendHeartbeat(vars.AppName, c.instanceID)
case <-c.done:
return
}
}
}()
}
func (c *HikoQiuClient) DeRegister() {
close(c.done)
c.vo.Status = eureka.STATUS_DOWN
c.instanceID, _ = c.api.RegisterInstanceWithVo(c.vo)
c.api.DeRegisterInstance(vars.AppName, c.instanceID)
}
|
// This file was generated for SObject ApexTestRunResult, API Version v43.0 at 2018-07-30 03:48:06.346527851 -0400 EDT m=+52.691111397
package sobjects
import (
"fmt"
"strings"
)
type ApexTestRunResult struct {
BaseSObject
AsyncApexJobId string `force:",omitempty"`
ClassesCompleted int `force:",omitempty"`
ClassesEnqueued int `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
EndTime string `force:",omitempty"`
Id string `force:",omitempty"`
IsAllTests bool `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
JobName string `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
MethodsCompleted int `force:",omitempty"`
MethodsEnqueued int `force:",omitempty"`
MethodsFailed int `force:",omitempty"`
Source string `force:",omitempty"`
StartTime string `force:",omitempty"`
Status string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
TestTime int `force:",omitempty"`
UserId string `force:",omitempty"`
}
func (t *ApexTestRunResult) ApiName() string {
return "ApexTestRunResult"
}
func (t *ApexTestRunResult) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("ApexTestRunResult #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tAsyncApexJobId: %v\n", t.AsyncApexJobId))
builder.WriteString(fmt.Sprintf("\tClassesCompleted: %v\n", t.ClassesCompleted))
builder.WriteString(fmt.Sprintf("\tClassesEnqueued: %v\n", t.ClassesEnqueued))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tEndTime: %v\n", t.EndTime))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsAllTests: %v\n", t.IsAllTests))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tJobName: %v\n", t.JobName))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tMethodsCompleted: %v\n", t.MethodsCompleted))
builder.WriteString(fmt.Sprintf("\tMethodsEnqueued: %v\n", t.MethodsEnqueued))
builder.WriteString(fmt.Sprintf("\tMethodsFailed: %v\n", t.MethodsFailed))
builder.WriteString(fmt.Sprintf("\tSource: %v\n", t.Source))
builder.WriteString(fmt.Sprintf("\tStartTime: %v\n", t.StartTime))
builder.WriteString(fmt.Sprintf("\tStatus: %v\n", t.Status))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
builder.WriteString(fmt.Sprintf("\tTestTime: %v\n", t.TestTime))
builder.WriteString(fmt.Sprintf("\tUserId: %v\n", t.UserId))
return builder.String()
}
type ApexTestRunResultQueryResponse struct {
BaseQuery
Records []ApexTestRunResult `json:"Records" force:"records"`
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importer
import (
"context"
"slices"
"testing"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/lightning/duplicate"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/dbutil"
"github.com/pingcap/tidb/util/extsort"
"github.com/stretchr/testify/require"
)
var (
exampleHandleKey = tablecodec.EncodeRowKeyWithHandle(121, kv.IntHandle(22))
exampleIndexID = int64(23)
exampleIndexKey = tablecodec.EncodeIndexSeekKey(122, exampleIndexID, nil)
)
func TestErrorOnDup(t *testing.T) {
h := &errorOnDup{}
require.NoError(t, h.Begin(exampleHandleKey))
require.NoError(t, h.Append([]byte{1}))
require.NoError(t, h.Append([]byte{2}))
err := h.End()
require.ErrorIs(t, err, ErrDuplicateKey)
dupErr := errors.Cause(err).(*errors.Error)
require.Equal(t, conflictOnHandle, dupErr.Args()[0])
require.Equal(t, [][]byte{{1}, {2}}, dupErr.Args()[1])
require.NoError(t, h.Close())
h = &errorOnDup{}
require.NoError(t, h.Begin(exampleIndexKey))
require.NoError(t, h.Append([]byte{11}))
require.NoError(t, h.Append([]byte{12}))
err = h.End()
require.ErrorIs(t, err, ErrDuplicateKey)
dupErr = errors.Cause(err).(*errors.Error)
require.Equal(t, int64(23), dupErr.Args()[0])
require.Equal(t, [][]byte{{11}, {12}}, dupErr.Args()[1])
require.NoError(t, h.Close())
}
func TestReplaceOnDup(t *testing.T) {
runDupHandlerTest(t,
func(w extsort.Writer) duplicate.Handler { return &replaceOnDup{w: w} },
[]dupRecord{{
exampleHandleKey, [][]byte{[]byte("01"), []byte("02"), []byte("03")}},
{exampleIndexKey, [][]byte{[]byte("11"), []byte("12"), []byte("13")}}},
map[int64][][]byte{
conflictOnHandle: {[]byte("01"), []byte("02")},
exampleIndexID: {[]byte("11"), []byte("12")},
},
)
}
func TestIgnoreOnDup(t *testing.T) {
runDupHandlerTest(t,
func(w extsort.Writer) duplicate.Handler { return &ignoreOnDup{w: w} },
[]dupRecord{{
exampleHandleKey, [][]byte{[]byte("01"), []byte("02"), []byte("03")}},
{exampleIndexKey, [][]byte{[]byte("11"), []byte("12"), []byte("13")}}},
map[int64][][]byte{
conflictOnHandle: {[]byte("02"), []byte("03")},
exampleIndexID: {[]byte("12"), []byte("13")},
},
)
}
type dupRecord struct {
key []byte
rowIDs [][]byte
}
func runDupHandlerTest(
t *testing.T,
makeHandler func(w extsort.Writer) duplicate.Handler,
input []dupRecord,
ignoredRowIDs map[int64][][]byte,
) {
ignoreRows, err := extsort.OpenDiskSorter(t.TempDir(), nil)
require.NoError(t, err)
defer ignoreRows.Close()
ctx := context.Background()
w, err := ignoreRows.NewWriter(ctx)
require.NoError(t, err)
h := makeHandler(w)
for _, r := range input {
require.NoError(t, h.Begin(r.key))
for _, rowID := range r.rowIDs {
require.NoError(t, h.Append(rowID))
}
require.NoError(t, h.End())
}
require.NoError(t, h.Close())
require.NoError(t, ignoreRows.Sort(ctx))
it, err := ignoreRows.NewIterator(ctx)
require.NoError(t, err)
rowIDs := map[int64][][]byte{}
for it.First(); it.Valid(); it.Next() {
_, idxID, err := codec.DecodeVarint(it.UnsafeValue())
require.NoError(t, err)
rowIDs[idxID] = append(rowIDs[idxID], slices.Clone(it.UnsafeKey()))
}
require.NoError(t, it.Error())
require.NoError(t, it.Close())
require.Equal(t, ignoredRowIDs, rowIDs)
}
func TestSimplifyTable(t *testing.T) {
testCases := []struct {
table string
colPerm []int
expTable string
expTableHasNoCols bool
expColPerm []int
}{
{
table: "CREATE TABLE t(a int, b int, c int)",
colPerm: []int{0, 1, 2, -1},
expTableHasNoCols: true,
expColPerm: []int{-1},
},
{
table: "CREATE TABLE t(a int PRIMARY KEY, b int, c int)",
colPerm: []int{2, 0, 1},
expTable: "CREATE TABLE t(a int PRIMARY KEY)",
expColPerm: []int{2},
},
{
table: "CREATE TABLE t(a int UNIQUE KEY, b int, c int, d int, INDEX idx_b(b), INDEX idx_c(c), UNIQUE INDEX idx_bc(b, c))",
colPerm: []int{0, 1, 2, 3, 10},
expTable: "CREATE TABLE t(a int UNIQUE KEY, b int, c int, UNIQUE INDEX idx_bc(b, c))",
expColPerm: []int{0, 1, 2, 10},
},
{
table: "CREATE TABLE t(a int, b int, c int, d int, INDEX idx_b(b), INDEX idx_c(c), UNIQUE INDEX idx_cd(c, d))",
colPerm: []int{0, 1, 2, 3, 10},
expTable: "CREATE TABLE t(c int, d int, UNIQUE INDEX idx_cd(c, d))",
expColPerm: []int{2, 3, 10},
},
}
for _, tc := range testCases {
p := parser.New()
originalTblInfo, err := dbutil.GetTableInfoBySQL(tc.table, p)
require.NoError(t, err)
// run twice to make sure originalTblInfo is not changed
for i := 0; i < 2; i++ {
actualTblInfo, actualColPerm := simplifyTable(originalTblInfo, tc.colPerm)
if tc.expTableHasNoCols {
require.Empty(t, actualTblInfo.Columns)
} else {
expTblInfo, err := dbutil.GetTableInfoBySQL(tc.expTable, p)
require.NoError(t, err)
require.Equal(t, len(expTblInfo.Columns), len(actualTblInfo.Columns))
for i, col := range actualTblInfo.Columns {
require.Equal(t, expTblInfo.Columns[i].Name, col.Name)
require.Equal(t, expTblInfo.Columns[i].Offset, col.Offset)
}
require.Equal(t, len(expTblInfo.Indices), len(actualTblInfo.Indices))
for i, idxInfo := range actualTblInfo.Indices {
require.Equal(t, expTblInfo.Indices[i].Name, idxInfo.Name)
require.Equal(t, expTblInfo.Indices[i].Columns, idxInfo.Columns)
}
}
require.Equal(t, tc.expColPerm, actualColPerm)
}
}
}
|
package blc
import (
"flag"
"fmt"
"log"
"os"
)
//对blockchain进行命令行管理
//CLI 对象
type CLI struct {
}
//PrintUsage 用法展示
func PrintUsage() {
fmt.Println("Usage:")
//初始化区块链--
fmt.Printf("\tcreateblockchain -address address --create a blockchain\n")
//添加区块
fmt.Printf("\taddblock -data DATA --add block to blockchain\n")
//打印完整的区块信息
fmt.Printf("\tprintblockchain --print the information of blockchain\n")
//通过命令行转账
fmt.Printf("\tsend -from FROM -to TO -amount AMOUNT-- transfer AMOUNT from FROM to TO\n")
fmt.Printf("\t\tthe descrition of transfer function\n")
fmt.Printf("\t\t\t-from FROM -- the source address of this transaction\n")
fmt.Printf("\t\t\t-to TO -- the destination address of this transaction\n")
fmt.Printf("\t\t\t-amount AMOUNT -- the value of this transaction\n")
}
//createBlockchain 初始化区块链
func (cli *CLI) createBlockchain(address string) {
CreateBlockChainWithGenesis(address)
}
//addBlock 添加区块
func (cli *CLI) addBlock(txs []*Transaction) {
//判断数据库是否存在
if !dbExist() {
fmt.Println("there is no blockchain....please call createblockchain command first")
os.Exit(1)
}
blockchain := BlockChainObject()
blockchain.AddBlock(txs)
}
//printChain 打印
func (cli *CLI) printChain() {
if !dbExist() {
fmt.Println("there is no blockchain....please call createblockchain command first")
os.Exit(1)
}
blockchain := BlockChainObject()
blockchain.PrintChain()
}
//IsValidArgs 参数数量检测函数
func IsValidArgs() {
if len(os.Args) < 2 {
PrintUsage()
os.Exit(1)
}
}
//send 发起交易
func send() {
}
//Run 运行命令行
func (cli *CLI) Run() {
IsValidArgs()
//新建相关命令
//添加区块
addBlockCmd := flag.NewFlagSet("addblock", flag.ExitOnError)
//输出区块链完整信息
printChainCmd := flag.NewFlagSet("printblockchain", flag.ExitOnError)
//创建区块链
createBlockChainWithGenesisBlockCmd := flag.NewFlagSet("createblockchain", flag.ExitOnError)
//发起交易
sendCmd := flag.NewFlagSet("send", flag.ExitOnError)
//数据参数
flagAddBlockArg := addBlockCmd.String("data", "sent 100 btc to yhh", "添加区块数据")
//创建区块链时指定的矿工奖励
flagCreateBlockchain := createBlockChainWithGenesisBlockCmd.String("address", "yhh", "system reward")
//发起交易参数
flagSendFromArg := sendCmd.String("from", "", "the source address of transaction")
flagSendToArg := sendCmd.String("to", "", "the destination address of transaction")
flagSendAmountArg := sendCmd.String("amount", "", "the value address of transaction")
//判断命令
switch os.Args[1] {
case "send":
if err := sendCmd.Parse(os.Args[2:]); nil != err {
log.Panicf("parse sendCmd failed! %v\n", err)
}
case "addblock":
if err := addBlockCmd.Parse(os.Args[2:]); nil != err {
log.Panicf("parse addBlockCmd failed! %v\n", err)
}
case "printblockchain":
if err := printChainCmd.Parse(os.Args[2:]); nil != err {
log.Panicf("parse printChainCmd failed! %v\n", err)
}
case "createblockchain":
if err := createBlockChainWithGenesisBlockCmd.Parse(os.Args[2:]); nil != err {
log.Panicf("parse createBlockChainWithGenesisBlockCmd failed! %v\n", err)
}
default:
//没有传递命令或者不在列表内
PrintUsage()
os.Exit(1)
}
//添加区块命令
if addBlockCmd.Parsed() {
if *flagAddBlockArg == "" {
PrintUsage()
os.Exit(1)
}
cli.addBlock([]*Transaction{})
}
//输出区块链信息
if printChainCmd.Parsed() {
cli.printChain()
}
//创建区块链
if createBlockChainWithGenesisBlockCmd.Parsed() {
if *flagCreateBlockchain == "" {
PrintUsage()
os.Exit(1)
}
cli.createBlockchain(*flagCreateBlockchain)
}
//./bc.exe send -from '[\"*\"]' -to '[\"aaa\"]' -amount '[\"10\"]'
//发起转账
if sendCmd.Parsed() {
if *flagSendFromArg == "" {
fmt.Println("the source address shall not be nil")
PrintUsage()
os.Exit(1)
}
if *flagSendToArg == "" {
fmt.Println("the destination of transaction shall not be nil")
PrintUsage()
os.Exit(1)
}
if *flagSendAmountArg == "" {
fmt.Println("the value shall not be nil")
PrintUsage()
os.Exit(1)
}
fmt.Printf("\tFROM:[%s]\n", JSONToSlice(*flagSendFromArg))
fmt.Printf("\tTO:[%s]\n", JSONToSlice(*flagSendToArg))
fmt.Printf("\tAMOUNT:[%s]\n", JSONToSlice(*flagSendAmountArg))
}
}
|
package bills
import {
}
// Post represents a Social Media Post type.
type Bill struct {
UUID string `json:"uuid"`
OriginalFileName string `json:"OriginalFileName"`
GeneratedFileName string `json: "GeneratedFileName"`
}
// The init() function is responsible for initializing the mood state
func init() {
}
// NewPost is responsible for creating an instance of the Post type.
func NewBill(uuid string, originalfilename string, generatedfilename string) *Bill {
return &Bill(UUID: uuid, OriginalFileName: originalfilename, GeneratedFileName: generatedfilename)
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func main() {
scanner := bufio.NewScanner(os.Stdin)
var numeri []float64
fmt.Println("inserisci i numeri")
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
input := scanner.Text()
numconv, _ := strconv.ParseFloat(input, 4)
numeri = append(numeri, numconv)
}
fmt.Println(max(numeri))
fmt.Println(min(numeri))
fmt.Println(med(numeri))
}
func max(num []float64) (max float64) {
for i := 0; i < len(num); i++ {
if num[i] >= max {
max = num[i]
}
}
return max
}
func min(num []float64) (min float64) {
minimo := 1000000000.00
for i := 0; i < len(num); i++ {
if num[i] <= minimo {
minimo = num[i]
}
}
return minimo
}
func med(num []float64) (med float64) {
totale := 0.0
for i := 0; i < len(num)-1; i++ {
totale += num[i]
}
med = totale / float64(len(num))
return med
}
|
// Copyright 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rust
import (
"strings"
"testing"
)
// Test that variants are being generated correctly, and that crate-types are correct.
func TestLibraryVariants(t *testing.T) {
ctx := testRust(t, `
rust_library_host {
name: "libfoo",
srcs: ["foo.rs"],
crate_name: "foo",
}`)
// Test all variants are being built.
libfooRlib := ctx.ModuleForTests("libfoo", "linux_glibc_x86_64_rlib").Output("libfoo.rlib")
libfooDylib := ctx.ModuleForTests("libfoo", "linux_glibc_x86_64_dylib").Output("libfoo.dylib.so")
libfooStatic := ctx.ModuleForTests("libfoo", "linux_glibc_x86_64_static").Output("libfoo.a")
libfooShared := ctx.ModuleForTests("libfoo", "linux_glibc_x86_64_shared").Output("libfoo.so")
rlibCrateType := "rlib"
dylibCrateType := "dylib"
sharedCrateType := "cdylib"
staticCrateType := "static"
// Test crate type for rlib is correct.
if !strings.Contains(libfooRlib.Args["rustcFlags"], "crate-type="+rlibCrateType) {
t.Errorf("missing crate-type for static variant, expecting %#v, rustcFlags: %#v", rlibCrateType, libfooRlib.Args["rustcFlags"])
}
// Test crate type for dylib is correct.
if !strings.Contains(libfooDylib.Args["rustcFlags"], "crate-type="+dylibCrateType) {
t.Errorf("missing crate-type for static variant, expecting %#v, rustcFlags: %#v", dylibCrateType, libfooDylib.Args["rustcFlags"])
}
// Test crate type for C static libraries is correct.
if !strings.Contains(libfooStatic.Args["rustcFlags"], "crate-type="+staticCrateType) {
t.Errorf("missing crate-type for static variant, expecting %#v, rustcFlags: %#v", staticCrateType, libfooStatic.Args["rustcFlags"])
}
// Test crate type for C shared libraries is correct.
if !strings.Contains(libfooShared.Args["rustcFlags"], "crate-type="+sharedCrateType) {
t.Errorf("missing crate-type for shared variant, expecting %#v, got rustcFlags: %#v", sharedCrateType, libfooShared.Args["rustcFlags"])
}
}
// Test that dylibs are not statically linking the standard library.
func TestDylibPreferDynamic(t *testing.T) {
ctx := testRust(t, `
rust_library_host_dylib {
name: "libfoo",
srcs: ["foo.rs"],
crate_name: "foo",
}`)
libfooDylib := ctx.ModuleForTests("libfoo", "linux_glibc_x86_64_dylib").Output("libfoo.dylib.so")
if !strings.Contains(libfooDylib.Args["rustcFlags"], "prefer-dynamic") {
t.Errorf("missing prefer-dynamic flag for libfoo dylib, rustcFlags: %#v", libfooDylib.Args["rustcFlags"])
}
}
func TestValidateLibraryStem(t *testing.T) {
testRustError(t, "crate_name must be defined.", `
rust_library_host {
name: "libfoo",
srcs: ["foo.rs"],
}`)
testRustError(t, "library crate_names must be alphanumeric with underscores allowed", `
rust_library_host {
name: "libfoo-bar",
srcs: ["foo.rs"],
crate_name: "foo-bar"
}`)
testRustError(t, "Invalid name or stem property; library filenames must start with lib<crate_name>", `
rust_library_host {
name: "foobar",
srcs: ["foo.rs"],
crate_name: "foo_bar"
}`)
testRustError(t, "Invalid name or stem property; library filenames must start with lib<crate_name>", `
rust_library_host {
name: "foobar",
stem: "libfoo",
srcs: ["foo.rs"],
crate_name: "foo_bar"
}`)
testRustError(t, "Invalid name or stem property; library filenames must start with lib<crate_name>", `
rust_library_host {
name: "foobar",
stem: "foo_bar",
srcs: ["foo.rs"],
crate_name: "foo_bar"
}`)
}
|
package tmpl1
import (
"fmt"
"testing"
)
func TestMySqrt(t *testing.T) {
testMySqrt(t, mySqrt, mySqrt1)
}
func testMySqrt(t *testing.T, fs ...func(int) int) {
tcs := []struct {
input int
expect int
}{
{0, 0},
{1, 1},
{2, 1},
{3, 1},
{4, 2},
{5, 2},
{6, 2},
{7, 2},
{8, 2},
{9, 3},
{65535, 255},
{65536, 256},
{65537, 256},
{1<<31 - 1, 46340},
{1<<63 - 1, 3037000499},
}
for fIdx, f := range fs {
for i, tc := range tcs {
t.Run(fmt.Sprintf("func #%d, task #%d", fIdx, i), func(t *testing.T) {
if actual := f(tc.input); actual != tc.expect {
t.Errorf("actual: %d, expect: %d", actual, tc.expect)
}
})
}
}
}
func BenchmarkMySqrt(b *testing.B) {
benchmarkMySqrt(b, mySqrt)
}
func BenchmarkMySqrt1(b *testing.B) {
benchmarkMySqrt(b, mySqrt1)
}
func benchmarkMySqrt(b *testing.B, f func(int) int) {
for i := 0; i < b.N; i++ {
for n := 1<<63 - 1<<10; n < (1<<63 - 1); n++ {
f(n)
}
for n := 1; n < 1<<10; n++ {
f(n)
}
}
}
/*
goos: linux
goarch: amd64
pkg: adventure/BinarySearch/bs/tmpl1
BenchmarkMySqrt-8 5000 289058 ns/op
BenchmarkMySqrt1-8 3000 552816 ns/op
PASS
*/
|
package controller
import (
"github.com/allentom/youcomic-api/auth"
ApiError "github.com/allentom/youcomic-api/error"
"github.com/allentom/youcomic-api/model"
"github.com/allentom/youcomic-api/permission"
"github.com/allentom/youcomic-api/serializer"
"github.com/allentom/youcomic-api/services"
"github.com/allentom/youcomic-api/utils"
"github.com/allentom/youcomic-api/validate"
"github.com/gin-gonic/gin"
"net/http"
)
type RegisterUserResponseBody struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
}
// register user handler
//
// path: /user/register
//
// method: post
var RegisterUserHandler gin.HandlerFunc = func(context *gin.Context) {
var err error
requestBody := RegisterUserResponseBody{}
err = context.ShouldBindJSON(&requestBody)
if err != nil {
ApiError.RaiseApiError(context, ApiError.JsonParseError, nil)
return
}
// check validate
if isValidate := validate.RunValidatorsAndRaiseApiError(context,
&validate.UniqUserNameValidator{Value: requestBody.Username},
&validate.StringLengthValidator{Value: requestBody.Username, FieldName: "username", LessThan: 16, GreaterThan: 4},
&validate.StringLengthValidator{Value: requestBody.Password, FieldName: "password", LessThan: 16, GreaterThan: 4},
&validate.EmailValidator{Value: requestBody.Email},
); !isValidate {
return
}
user := model.User{Username: requestBody.Username, Password: requestBody.Password, Email: requestBody.Email}
err = services.RegisterUser(&user)
if err != nil {
ApiError.RaiseApiError(context, err, nil)
return
}
ServerSuccessResponse(context)
}
type LoginUserRequestBody struct {
Username string `json:"username"`
Password string `json:"password"`
}
type UserAuthResponse struct {
Id uint `json:"id"`
Sign string `json:"sign"`
}
// login user handler
//
// path: /user/auth
//
// method: post
var LoginUserHandler gin.HandlerFunc = func(context *gin.Context) {
var err error
requestBody := LoginUserRequestBody{}
err = context.ShouldBindJSON(&requestBody)
if err != nil {
ApiError.RaiseApiError(context, ApiError.JsonParseError, nil)
return
}
//validate value
if isValidate := validate.RunValidatorsAndRaiseApiError(context,
&validate.StringLengthValidator{Value: requestBody.Username, FieldName: "username", LessThan: 16, GreaterThan: 4},
&validate.StringLengthValidator{Value: requestBody.Password, FieldName: "password", LessThan: 16, GreaterThan: 4},
); !isValidate {
return
}
user, sign, err := services.UserLogin(requestBody.Username, requestBody.Password)
if err != nil {
ApiError.RaiseApiError(context, err, nil)
return
}
context.JSON(http.StatusOK, UserAuthResponse{
Id: user.ID,
Sign: sign,
})
}
// get user handler
//
// path: /user/:id
//
// method: get
var GetUserHandler gin.HandlerFunc = func(context *gin.Context) {
var err error
id, err := GetLookUpId(context, "id")
if err != nil {
ApiError.RaiseApiError(context, ApiError.RequestPathError, nil)
return
}
var user model.User
err = services.GetModelById(&user, id)
if err != nil {
ApiError.RaiseApiError(context, err, nil)
return
}
template := serializer.BaseUserTemplate{}
err = template.Serializer(user, nil)
if err != nil {
ApiError.RaiseApiError(context, err, nil)
return
}
context.JSON(http.StatusOK, template)
}
// get user groups handler
//
// path: /user/:id/groups
//
// method: get
var GetUserUserGroupsHandler gin.HandlerFunc = func(context *gin.Context) {
var err error
id, err := GetLookUpId(context, "id")
if err != nil {
ApiError.RaiseApiError(context, ApiError.RequestPathError, nil)
return
}
queryBuilder := services.UserGroupQueryBuilder{}
queryBuilder.SetUserGroupUser(id)
count, usergroups, err := queryBuilder.ReadModels()
if err != nil {
ApiError.RaiseApiError(context, err, nil)
return
}
result := serializer.SerializeMultipleTemplate(usergroups, &serializer.BaseUserGroupTemplate{}, nil)
responseBody := serializer.DefaultListContainer{}
responseBody.SerializeList(result, map[string]interface{}{
"page": 1,
"pageSize": 10,
"count": count,
"url": context.Request.URL,
})
context.JSON(http.StatusOK, responseBody)
}
// get user list handler
//
// path: /users
//
// method: get
var GetUserUserListHandler gin.HandlerFunc = func(context *gin.Context) {
claims, err := auth.ParseAuthHeader(context)
if err != nil {
ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil)
return
}
if hasPermission := permission.CheckPermissionAndServerError(context,
&permission.StandardPermissionChecker{PermissionName: permission.GetUserListPermissionName, UserId: claims.UserId},
); !hasPermission {
return
}
userQueryBuilder := services.UserQueryBuilder{}
//get page
pagination := DefaultPagination{}
pagination.Read(context)
userQueryBuilder.SetPageFilter(pagination.Page, pagination.PageSize)
//query filter
filterMapping := []FilterMapping{
{
Lookup: "id",
Method: "InId",
Many: true,
},
{
Lookup: "name",
Method: "SetUserNameFilter",
Many: true,
},
{
Lookup: "nameSearch",
Method: "SetNameSearchQueryFilter",
Many: true,
},
{
Lookup: "nicknameSearch",
Method: "SetNicknameSearchQueryFilter",
Many: true,
},
{
Lookup: "usergroup",
Method: "SetUserGroupQueryFilter",
Many: true,
},{
Lookup: "order",
Method: "SetOrderFilter",
Many: true,
},
}
for _, filter := range filterMapping {
utils.FilterByParam(context, filter.Lookup, &userQueryBuilder, filter.Method, filter.Many)
}
count, users, err := userQueryBuilder.ReadModels()
result := serializer.SerializeMultipleTemplate(users, &serializer.ManagerUserTemplate{}, nil)
responseBody := serializer.DefaultListContainer{}
responseBody.SerializeList(result, map[string]interface{}{
"page": pagination.Page,
"pageSize": pagination.PageSize,
"count": count,
"url": context.Request.URL,
})
context.JSON(http.StatusOK, responseBody)
}
type ChangeUserPasswordRequestBody struct {
OldPassword string `json:"oldPassword"`
NewPassword string `json:"newPassword"`
}
// change password handler
//
// path: /user/password
//
// method: put
var ChangeUserPasswordHandler gin.HandlerFunc = func(context *gin.Context) {
claims, err := auth.ParseAuthHeader(context)
if err != nil {
ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil)
return
}
requestBody := ChangeUserPasswordRequestBody{}
err = context.ShouldBindJSON(&requestBody)
if err != nil {
ApiError.RaiseApiError(context, ApiError.JsonParseError, nil)
return
}
isValidate := validate.RunValidatorsAndRaiseApiError(
context,
&validate.StringLengthValidator{Value: requestBody.OldPassword, GreaterThan: 4, LessThan: 256, FieldName: "oldPassword"},
&validate.StringLengthValidator{Value: requestBody.NewPassword, GreaterThan: 4, LessThan: 256, FieldName: "newPassword"},
)
if !isValidate {
return
}
err = services.ChangeUserPassword(claims.UserId, requestBody.OldPassword, requestBody.NewPassword)
if err != nil {
if err == services.UserPasswordInvalidate {
ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil)
return
}
ApiError.RaiseApiError(context, err, nil)
return
}
ServerSuccessResponse(context)
}
type ChangeUserNicknameRequestBody struct {
Nickname string `json:"nickname"`
}
// change nickname handler
//
// path: /user/nickname
//
// method: put
var ChangeUserNicknameHandler gin.HandlerFunc = func(context *gin.Context) {
claims, err := auth.ParseAuthHeader(context)
if err != nil {
ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil)
return
}
requestBody := ChangeUserNicknameRequestBody{}
err = context.ShouldBindJSON(&requestBody)
if err != nil {
ApiError.RaiseApiError(context, ApiError.JsonParseError, nil)
return
}
isValidate := validate.RunValidatorsAndRaiseApiError(
context,
&validate.StringLengthValidator{Value: requestBody.Nickname, GreaterThan: 4, LessThan: 256, FieldName: "nickname"},
)
if !isValidate {
return
}
err = services.ChangeUserNickname(claims.UserId, requestBody.Nickname)
if err != nil {
if err == services.UserNotFoundError {
ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil)
return
}
ApiError.RaiseApiError(context, err, nil)
return
}
ServerSuccessResponse(context)
}
// get account histories
//
// path: /account/histories
//
// method: get
var UserHistoryHandler gin.HandlerFunc = func(context *gin.Context) {
queryBuilder := &services.HistoryQueryBuilder{}
userClaimsInterface, _ := context.Get("claim")
userClaim := userClaimsInterface.(*auth.UserClaims)
queryBuilder.SetUserIdFilter(userClaim.UserId)
view := ListView{
Context: context,
Pagination: &DefaultPagination{},
QueryBuilder: queryBuilder,
FilterMapping: []FilterMapping{
{
Lookup: "id",
Method: "InId",
Many: true,
},
{
Lookup: "order",
Method: "SetOrderFilter",
Many: false,
},
},
GetContainer: func() serializer.ListContainerSerializer {
return &serializer.DefaultListContainer{}
},
GetTemplate: func() serializer.TemplateSerializer {
return &serializer.BaseHistoryTemplate{}
},
}
view.Run()
}
// clear account histories
//
// path: /account/histories
//
// method: delete
var DeleteUserHistoryHandler gin.HandlerFunc = func(context *gin.Context) {
userClaimsInterface, _ := context.Get("claim")
userClaim := userClaimsInterface.(*auth.UserClaims)
queryBuilder := services.HistoryQueryBuilder{}
queryBuilder.SetUserIdFilter(userClaim.UserId)
err := queryBuilder.DeleteModels(true)
if err == services.UserNotFoundError {
ApiError.RaiseApiError(context, ApiError.UserAuthFailError, nil)
return
}
ServerSuccessResponse(context)
}
|
package main
import (
"log"
"runtime"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/glfw/v3.2/glfw"
"./basic"
"./ball"
"math/rand"
)
const (
width = 500
height = 500
)
type Drawable interface {
Draw()
}
const BallNum = 1
func main() {
runtime.LockOSThread()
window := initGlfw()
defer glfw.Terminate()
initOpenGL()
rand.Seed(123456)
balls := make([]*ball.Ball, BallNum)
for i := 0; i < BallNum; i++ {
balls[i] = ball.NewBall(&basic.Point{X: -0.9+float32(i)*0.2, Y: rand.Float32()})
}
for !window.ShouldClose() {
for i := 0; i < BallNum; i++ {
balls[i].Update(0.01)
}
draw(balls, window)
}
}
func draw(balls []*ball.Ball, window *glfw.Window) {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
for i := 0; i < BallNum; i++ {
balls[i].Draw()
}
glfw.PollEvents()
window.SwapBuffers()
}
func initGlfw() *glfw.Window {
if err := glfw.Init(); err != nil {
panic(err)
}
glfw.WindowHint(glfw.Resizable, glfw.False)
glfw.WindowHint(glfw.ContextVersionMajor, 4)
glfw.WindowHint(glfw.ContextVersionMinor, 1)
glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile)
glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True)
window, err := glfw.CreateWindow(width, height, "Title", nil, nil)
if err != nil {
panic(err)
}
window.MakeContextCurrent()
return window
}
func initOpenGL() {
if err := gl.Init(); err != nil {
panic(err)
}
version := gl.GoStr(gl.GetString(gl.VERSION))
log.Println("OpenGL version", version)
}
|
// Copyright (c) 2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"errors"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"strconv"
"time"
rpc "github.com/conformal/btcrpcclient"
"github.com/conformal/btcutil"
)
// Actor describes an actor on the simulation network. Each actor runs
// independantly without external input to decide it's behavior.
type Actor struct {
args procArgs
cmd *exec.Cmd
client *rpc.Client
addressNum int
downstream chan btcutil.Address
upstream chan btcutil.Address
stop chan struct{}
quit chan struct{}
}
// NewActor creates a new actor which runs its own wallet process connecting
// to the btcd chain server specified by chain, and listening for simulator
// websocket connections on the specified port.
func NewActor(chain *ChainServer, port uint16) (*Actor, error) {
// Please don't run this as root.
if port < 1024 {
return nil, errors.New("invalid actor port")
}
dir, err := ioutil.TempDir("", "actor")
if err != nil {
return nil, err
}
a := Actor{
args: procArgs{
chainSvr: *chain,
dir: dir,
port: strconv.FormatUint(uint64(port), 10),
walletPassphrase: "walletpass",
},
addressNum: 1000,
quit: make(chan struct{}),
}
return &a, nil
}
// Start creates the command to execute a wallet process and starts the
// command in the background, attaching the command's stderr and stdout
// to the passed writers. Nil writers may be used to discard output.
//
// In addition to starting the wallet process, this runs goroutines to
// handle wallet notifications (TODO: actually do this) and requests
// the wallet process to create an intial encrypted wallet, so that it
// can actually send and receive BTC.
//
// If the RPC client connction cannot be established or wallet cannot
// be created, the wallet process is killed and the actor directory
// removed.
func (a *Actor) Start(stderr, stdout io.Writer, com Communication) error {
// Overwriting the previously created command would be sad.
if a.cmd != nil {
return errors.New("actor command previously created")
}
balanceUpdate := make(chan btcutil.Amount, 1)
connected := make(chan struct{})
var firstConn bool
const timeoutSecs int64 = 3600 * 24
a.downstream = com.downstream
a.upstream = com.upstream
a.stop = com.stop
// Create and start command in background.
a.cmd = a.args.Cmd()
a.cmd.Stdout = stderr
a.cmd.Stderr = stdout
if err := a.cmd.Start(); err != nil {
if err := a.Cleanup(); err != nil {
log.Printf("Cannot remove actor directory after "+
"failed start of wallet process: %v", err)
}
return err
}
// Create and start RPC client.
rpcConf := rpc.ConnConfig{
Host: "localhost:" + a.args.port,
Endpoint: "ws",
User: a.args.chainSvr.user,
Pass: a.args.chainSvr.pass,
Certificates: a.args.chainSvr.cert,
}
ntfnHandlers := rpc.NotificationHandlers{
OnBtcdConnected: func(conn bool) {
if conn && !firstConn {
firstConn = true
connected <- struct{}{}
}
},
// Update on every round bitcoin value.
OnAccountBalance: func(account string, balance btcutil.Amount, confirmed bool) {
if balance%1 == 0 && len(balanceUpdate) == 0 {
balanceUpdate <- balance
} else if balance%1 == 0 {
// Discard previous update
<-balanceUpdate
balanceUpdate <- balance
}
},
}
// The RPC client will not wait for the RPC server to start up, so
// loop a few times and attempt additional connections, sleeping
// after each failure.
var client *rpc.Client
var connErr error
for i := 0; i < connRetry; i++ {
if client, connErr = rpc.New(&rpcConf, &ntfnHandlers); connErr != nil {
time.Sleep(time.Duration(i) * 50 * time.Millisecond)
continue
}
a.client = client
break
}
if a.client == nil {
if err := a.cmd.Process.Kill(); err != nil {
log.Printf("Cannot kill wallet process after failed "+
"client connect: %v", err)
}
if err := a.Cleanup(); err != nil {
log.Printf("Cannot remove actor directory after "+
"failed client connect: %v", err)
}
return connErr
}
// Wait for btcd to connect
<-connected
// Create the wallet.
if err := a.client.CreateEncryptedWallet(a.args.walletPassphrase); err != nil {
if err := a.cmd.Process.Kill(); err != nil {
log.Printf("Cannot kill wallet process after failed "+
"wallet creation: %v", err)
}
if err := a.Cleanup(); err != nil {
log.Printf("Cannot remove actor directory after "+
"failed wallet creation: %v", err)
}
return err
}
// Create wallet addresses and unlock wallet.
log.Printf("%s: Creating wallet addresses. This may take a while...", rpcConf.Host)
addressSpace := make([]btcutil.Address, a.addressNum)
for i := range addressSpace {
addr, err := client.GetNewAddress()
if err != nil {
log.Printf("%s: Cannot create address #%d", rpcConf.Host, i+1)
return err
}
addressSpace[i] = addr
}
if err := a.client.WalletPassphrase(a.args.walletPassphrase, timeoutSecs); err != nil {
log.Printf("%s: Cannot unlock wallet: %v", rpcConf.Host, err)
return nil
}
// TODO: Probably add OnRescanFinished notification and make it sync here
// Send a random address upstream that will be used by the cpu miner.
a.upstream <- addressSpace[rand.Int()%a.addressNum]
// Receive from downstream (ie. start spending funds) only after coinbase matures
a.downstream = nil
var balance btcutil.Amount
out:
for {
select {
case a.upstream <- addressSpace[rand.Int()%a.addressNum]:
case addr := <-a.downstream:
// TODO: Probably handle following error better
if _, err := a.client.SendFromMinConf("", addr, 1, 0); err != nil {
log.Printf("%s: Cannot proceed with latest transaction: %v", rpcConf.Host, err)
}
case balance = <-balanceUpdate:
// Start sending funds
if balance > 100 && a.downstream == nil {
a.downstream = com.downstream
// else block downstream (ie. stop spending funds)
} else if balance == 0 && a.downstream != nil {
a.downstream = nil
}
case <-a.quit:
break out
}
}
return nil
}
// Stop kills the Actor's wallet process and shuts down any goroutines running
// to manage the Actor's behavior.
func (a *Actor) Stop() (err error) {
if killErr := a.cmd.Process.Kill(); killErr != nil {
err = killErr
}
a.cmd.Wait()
close(a.quit)
return
}
// Cleanup removes the directory an Actor's wallet process was previously using.
func (a *Actor) Cleanup() error {
return os.RemoveAll(a.args.dir)
}
type procArgs struct {
chainSvr ChainServer
dir string
port string
walletPassphrase string
}
func (p *procArgs) Cmd() *exec.Cmd {
return exec.Command("btcwallet", p.args()...)
}
func (p *procArgs) args() []string {
return []string{
"--simnet",
"--datadir=" + p.dir,
"--username=" + p.chainSvr.user,
"--password=" + p.chainSvr.pass,
"--rpcconnect=" + p.chainSvr.connect,
"--rpclisten=:" + p.port,
"--rpccert=" + p.chainSvr.certPath,
"--rpckey=" + p.chainSvr.keyPath,
}
}
|
package dushengchen
/**
Submission:
https://leetcode.com/submissions/detail/366509188/
*/
func multiply(num1 string, num2 string) string {
a := []rune(num1)
b := []rune(num2)
sum := make([]rune, len(num1)+len(num2))
sum[0] = '0'
for i := 0; i < len(a); i++ {
m := runeToInt(a[len(a)-i-1])
for j := 0; j < len(b); j++ {
n := runeToInt(b[len(b)-j-1])
x := m * n
for add := 0;; add++{
c := runeToInt(sum[i+j+add])
if c > 9 || c < 0 {
c = 0
}
x = x+c
rest := x % 10
sum[i+j+add] = intToRune(rest)
x = x / 10
// fmt.Println(m, n, c, x, string(sum))
if x == 0 {
break
}
}
}
// fmt.Println(i, m, string(sum))
}
ret := []rune{}
allZero := true
for i := cap(sum)-1; i >=0; i-- {
if m := runeToInt(sum[i]); m >= 0 && m <= 9 {
ret = append(ret, sum[i])
if m != 0 {
allZero = false
}
}
}
if allZero {
return "0"
}
return string(ret)
}
func runeToInt(c rune) int {
return int(c - '0')
}
func intToRune(i int) rune {
return rune(i) + '0'
}
|
package sfen
import (
"testing"
"bytes"
"fmt"
)
func TestParsePosition(t *testing.T) {
s, err := ParsePosition("position startpos moves 7g7f 3c3d 6g6f 8c8d 2h6h 8d8e 8h7g 7a6b 5i4h 5c5d 3i3h 5a4b 4h3i 1c1d 1g1f 3a3b 7i7h 6b5c 3i2h 6a5b 6i5h 4b3a 4g4f 2c2d 3g3f 3b2c 5h4g 4a3b 2i3g 2b4d 5g5f 3a2b 6f6e 4d7g+ 7h7g 4c4d 9g9f 9c9d 2g2f 7c7d B*6f 8b6b 3g4e 5c4b 6f4d 4b3c 4e3c+ 2a3c 4d6b+ 5b6b R*7a N*4e 7a4a+ B*7c 6e6d 7c6d 4a8a B*7c N*3g 4e3g+ 3h3g N*4e 4i3h 4e3g+ 3h3g 3c4e 6h6d 7c6d B*4d S*3c 4d6b+ 4e3g+ 2h3g R*4c G*2a 2b1c S*2b 3c2b N*4d P*4e 3g4h 4e4f 4g3g S*4g 4h5g 4g5f+ 5g5f G*5e 5f6g 4f4g+ 2a1a 5e4f 4d3b+ 2c3b S*1b 4g5g 6g7h 6d5e P*4d 4f3g L*1h N*6e 1f1e 6e7g+ 8i7g 5g6g 7h8h 6g7g 8h9h 7g8h 9h9g S*8f 8g8f G*8g")
if err != nil {
t.Fatalf("ParsePosition: %v", err)
}
var buf bytes.Buffer
if err := s.PrintSFEN(&buf); err != nil {
t.Fatalf("PrintSFEN: %v", err)
}
expected := "l+R6G/3+B2ssS/3p1r2k/p1p1pPppp/1p2b3P/PPP3PP1/Kg4g2/1+p6L/L7L b GS3Nn2p 115"
if str := buf.String(); str != expected {
t.Logf("expected=%v", expected)
t.Logf("actual =%v", buf.String())
t.Errorf("PrintSFEN")
}
}
func ExampleParsePosition() {
s, err := ParsePosition("position startpos moves 7g7f 3c3d 6g6f 8c8d 2h6h 8d8e 8h7g 7a6b 5i4h 5c5d 3i3h 5a4b 4h3i 1c1d 1g1f 3a3b 7i7h 6b5c 3i2h 6a5b 6i5h 4b3a 4g4f 2c2d 3g3f 3b2c 5h4g 4a3b 2i3g 2b4d 5g5f 3a2b 6f6e 4d7g+ 7h7g 4c4d 9g9f 9c9d 2g2f 7c7d B*6f 8b6b 3g4e 5c4b 6f4d 4b3c 4e3c+ 2a3c 4d6b+ 5b6b R*7a N*4e 7a4a+ B*7c 6e6d 7c6d 4a8a B*7c N*3g 4e3g+ 3h3g N*4e 4i3h 4e3g+ 3h3g 3c4e 6h6d 7c6d B*4d S*3c 4d6b+ 4e3g+ 2h3g R*4c G*2a 2b1c S*2b 3c2b N*4d P*4e 3g4h 4e4f 4g3g S*4g 4h5g 4g5f+ 5g5f G*5e 5f6g 4f4g+ 2a1a 5e4f 4d3b+ 2c3b S*1b 4g5g 6g7h 6d5e P*4d 4f3g L*1h N*6e 1f1e 6e7g+ 8i7g 5g6g 7h8h 6g7g 8h9h 7g8h 9h9g S*8f 8g8f G*8g")
if err != nil {
return
}
var buf bytes.Buffer
if err := s.PrintSFEN(&buf); err != nil {
return
}
expected := "l+R6G/3+B2ssS/3p1r2k/p1p1pPppp/1p2b3P/PPP3PP1/Kg4g2/1+p6L/L7L b GS3Nn2p 115"
str := buf.String()
fmt.Println(str == expected)
// Output: true
}
|
// Package none contains generic structures for installer
// configuration and management.
package external
// Name is name for the External platform.
const Name string = "external"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.