text stringlengths 11 4.05M |
|---|
package mycirculardeque
import "container/list"
type MyCircularDeque struct {
l *list.List
size int
}
/** Initialize your data structure here. Set the size of the deque to be k. */
func Constructor(k int) MyCircularDeque {
return MyCircularDeque{
l: list.New(),
size: k,
}
}
/** Adds an item at the front of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) InsertFront(value int) bool {
if this.IsFull() {
return false
}
this.l.PushFront(value)
return true
}
/** Adds an item at the rear of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) InsertLast(value int) bool {
if this.IsFull() {
return false
}
this.l.PushBack(value)
return true
}
/** Deletes an item from the front of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) DeleteFront() bool {
if this.IsEmpty() {
return false
}
this.l.Remove(this.l.Front())
return true
}
/** Deletes an item from the rear of Deque. Return true if the operation is successful. */
func (this *MyCircularDeque) DeleteLast() bool {
if this.IsEmpty() {
return false
}
this.l.Remove(this.l.Back())
return true
}
/** Get the front item from the deque. */
func (this *MyCircularDeque) GetFront() int {
if this.IsEmpty() {
return -1
}
return this.l.Front().Value.(int)
}
/** Get the last item from the deque. */
func (this *MyCircularDeque) GetRear() int {
if this.IsEmpty() {
return -1
}
return this.l.Back().Value.(int)
}
/** Checks whether the circular deque is empty or not. */
func (this *MyCircularDeque) IsEmpty() bool {
if this.l.Len() == 0 {
return true
} else {
return false
}
}
/** Checks whether the circular deque is full or not. */
func (this *MyCircularDeque) IsFull() bool {
if this.l.Len() == this.size {
return true
} else {
return false
}
}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSleep(t *testing.T) {
api := &timer{}
api.Sleep(0)
}
func TestTimerMiddleware(t *testing.T) {
context := createFakeGinContext()
timerMiddleware(context)
_, ok := context.Get("timerAPI")
assert.True(t, ok)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package utils
import (
"context"
// Used to embed api_wrapper.js in string variable `systemDataProviderJs`.
_ "embed"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
)
// systemDataProviderJs is a stringified JS file that exposes the SystemDataProvider mojo
// API.
//go:embed api_wrapper.js
var systemDataProviderJs string
// MojoAPI is a struct that encapsulates a SystemDataProvider mojo remote.
type MojoAPI struct {
conn *chrome.Conn
mojoRemote *chrome.JSObject
}
// SystemDataProviderMojoAPI returns a MojoAPI object that is connected to a SystemDataProvider
// mojo remote instance on success, or an error.
func SystemDataProviderMojoAPI(ctx context.Context, conn *chrome.Conn) (*MojoAPI, error) {
var mojoRemote chrome.JSObject
if err := conn.Call(ctx, &mojoRemote, systemDataProviderJs); err != nil {
return nil, errors.Wrap(err, "failed to set up the SystemDataProvider mojo API")
}
return &MojoAPI{conn, &mojoRemote}, nil
}
// RunFetchSystemInfo calls into the injected SystemDataProvider mojo API.
func (m *MojoAPI) RunFetchSystemInfo(ctx context.Context) error {
jsWrap := "function() { return this.fetchSystemInfo() }"
if err := m.mojoRemote.Call(ctx, nil, jsWrap); err != nil {
return errors.Wrap(err, "failed to run fetchSystemInfo")
}
return nil
}
// Release frees the resources help by the internal MojoAPI components.
func (m *MojoAPI) Release(ctx context.Context) error {
return m.mojoRemote.Release(ctx)
}
|
package main
import (
"github.com/ohmyray/my-blog/model"
"github.com/ohmyray/my-blog/route"
)
func main() {
model.InitConnection()
route.InitRouter()
}
|
package main
import (
"fmt"
"encoding/hex"
"encoding/base64"
)
func main() {
hex_string := "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d"
byte_slice, _ := hex.DecodeString(hex_string)
base64_string := base64.StdEncoding.EncodeToString(byte_slice)
fmt.Println(base64_string)
}
|
package keyadmin
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"strings"
)
type ConfigType struct {
DataUri string `json:"data_uri"`
PublicKeyDir string `json:"public_key_dir"`
PrivateKeyDir string `json:"private_key_dir"`
Way []string `json:"way"`
KeyLength int `json:"key_length"`
}
//config part
func (config *ConfigType) ParseJsonConfig(configfile string) (err error) {
if strings.EqualFold(configfile, "") {
return
}
_, err = os.Stat(configfile)
if err != nil {
return
}
filebyte, err := ioutil.ReadFile(configfile)
if err != nil {
return
}
if err = json.Unmarshal(filebyte, config); err != nil {
return
}
return
}
func (config *ConfigType) UpdateConfig(newconfig *ConfigType) {
// Using reflection here is not necessary, but it's a good exercise.
// For more information on reflections in Go, read "The Laws of Reflection"
// http://golang.org/doc/articles/laws_of_reflection.html
newVal := reflect.ValueOf(newconfig).Elem()
oldVal := reflect.ValueOf(config).Elem()
// typeOfT := newVal.Type()
for i := 0; i < newVal.NumField(); i++ {
newField := newVal.Field(i)
oldField := oldVal.Field(i)
// log.Printf("%d: %s %s = %v\n", i,
// typeOfT.Field(i).Name, newField.Type(), newField.Interface())
switch newField.Kind() {
case reflect.Interface:
if fmt.Sprintf("%v", newField.Interface()) != "" {
oldField.Set(newField)
}
case reflect.String:
s := newField.String()
if s != "" {
oldField.SetString(s)
}
case reflect.Int:
i := newField.Int()
if i != 0 {
oldField.SetInt(i)
}
}
}
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conn
import (
"net"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/uber/kraken/core"
"github.com/uber/kraken/gen/go/proto/p2p"
"github.com/uber/kraken/lib/torrent/storage"
"github.com/uber/kraken/utils/bitsetutil"
)
func TestHandshakerSetsConnFieldsProperly(t *testing.T) {
require := require.New(t)
l1, err := net.Listen("tcp", "localhost:0")
require.NoError(err)
defer l1.Close()
config := ConfigFixture()
namespace := core.TagFixture()
h1 := HandshakerFixture(config)
h2 := HandshakerFixture(config)
info := storage.TorrentInfoFixture(4, 1)
emptyRemoteBitfields := make(RemoteBitfields)
remoteBitfields := RemoteBitfields{
core.PeerIDFixture(): bitsetutil.FromBools(true, false),
core.PeerIDFixture(): bitsetutil.FromBools(false, true),
}
var wg sync.WaitGroup
start := time.Now()
wg.Add(1)
go func() {
defer wg.Done()
nc, err := l1.Accept()
require.NoError(err)
pc, err := h1.Accept(nc)
require.NoError(err)
require.Equal(h2.peerID, pc.PeerID())
require.Equal(info.Digest(), pc.Digest())
require.Equal(info.InfoHash(), pc.InfoHash())
require.Equal(info.Bitfield(), pc.Bitfield())
require.Equal(namespace, pc.Namespace())
c, err := h1.Establish(pc, info, remoteBitfields)
require.NoError(err)
require.Equal(h2.peerID, c.PeerID())
require.Equal(info.InfoHash(), c.InfoHash())
require.True(c.CreatedAt().After(start))
}()
wg.Add(1)
go func() {
defer wg.Done()
r, err := h2.Initialize(h1.peerID, l1.Addr().String(), info, emptyRemoteBitfields, namespace)
require.NoError(err)
require.Equal(h1.peerID, r.Conn.PeerID())
require.Equal(info.InfoHash(), r.Conn.InfoHash())
require.True(r.Conn.CreatedAt().After(start))
require.Equal(info.Bitfield(), r.Bitfield)
require.Equal(remoteBitfields, r.RemoteBitfields)
}()
wg.Wait()
}
func TestHandshakerHandlesEmptyBitfield(t *testing.T) {
require := require.New(t)
l1, err := net.Listen("tcp", "localhost:0")
require.NoError(err)
defer l1.Close()
config := ConfigFixture()
h1 := HandshakerFixture(config)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
nc, err := l1.Accept()
require.NoError(err)
_, err = h1.Accept(nc)
require.Error(err)
}()
wg.Add(1)
go func() {
defer wg.Done()
var msg p2p.Message
nc, err := net.DialTimeout("tcp", l1.Addr().String(), config.HandshakeTimeout)
require.NoError(err)
err = sendMessage(nc, &msg)
require.NoError(err)
}()
wg.Wait()
}
|
package eventchannel
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBuildEndpointSender(t *testing.T) {
requestBody := make([]byte, 10)
server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
requestBody, _ = io.ReadAll(req.Body)
res.WriteHeader(200)
}))
defer server.Close()
sender := BuildEndpointSender(server.Client(), server.URL, "module")
err := sender([]byte("message"))
assert.Equal(t, []byte("message"), requestBody)
assert.NoError(t, err)
}
func TestBuildEndpointSender_Error(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
res.WriteHeader(400)
}))
defer server.Close()
sender := BuildEndpointSender(server.Client(), server.URL, "module")
err := sender([]byte("message"))
assert.Error(t, err)
}
|
package config
import (
"flag"
"fmt"
"math/rand"
"net"
"strings"
"time"
)
// AppConf 应用全局参数
var AppConf appConf
func init() {
var mode string
flag.BoolVar(&AppConf.Debug, "debug", false, "调试模式,默认:false")
flag.StringVar(&AppConf.IP, "ip", "", "监听的IP地址,默认:127.0.0.1")
flag.IntVar(&AppConf.Port, "port", 0, "服务端口,默认:随机")
flag.StringVar(&mode, "mode", "app", "运行模式:server:API服务模式;web:Web模式;app:App模式(试验),默认:app")
// flag.StringVar(&mode, "mode", "web", "运行模式:server:API服务模式;web:Web模式;默认:web")
flag.StringVar(&AppConf.PrefixPath, "prefix", "", "Web模式下有效,WebUI的路径前缀,默认为空")
flag.StringVar(&AppConf.Token, "token", "", "API授权令牌,为空时不校验,默认为空")
flag.Parse()
if AppConf.IP == "" {
AppConf.IP = "127.0.0.1"
}
if AppConf.Port <= 0 {
AppConf.Port = newPort(AppConf.IP)
}
switch strings.ToLower(mode) {
case "server":
AppConf.IsServerMode = true
case "web":
AppConf.IsWebMode = true
case "app":
AppConf.IsAppMode = true
default:
AppConf.IsWebMode = true
}
if AppConf.IsServerMode || AppConf.IsAppMode {
AppConf.PrefixPath = ""
}
AppConf.Name = "dproxy"
AppConf.Version = "0.4.0"
AppConf.Started = time.Now().Unix()
}
type appConf struct {
Name string
Version string
Debug bool
Mode string
IP string
Port int
PrefixPath string
Token string
Started int64
IsServerMode bool
IsWebMode bool
IsAppMode bool
}
//newPort 查找可用端口
func newPort(ip string) int {
i := 0
for {
if i > 10 {
return 8080
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
port := r.Intn(60000)
if port <= 0 {
continue
}
conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", ip, port))
if err != nil {
if strings.Contains(err.Error(), "refused") {
return port
}
i++
continue
}
conn.Close()
}
}
|
package main
import (
"fmt"
)
type Person struct{
first_name string
last_name string
dateofbirth string
sex string
country string
}
type Employee struct{
Person
section string
work_years int
}
func (p *Person) String() string {
return fmt.Sprintf("Name : %s %s, M/F : %s", p.first_name, p.last_name, p.sex)
}
func (p *Person) CountryCode() string {
var code string
if p.country == "JAPAN" {
code = "JP"
}
if p.country == "USA" {
code = "US"
}
if p.country == "CHINA" {
code = "CH"
}
return code
}
func (e *Employee) Sections() string {
var code string
if e.section == "FSDD1" {
code = "Financial System Development Division 1"
}
if e.section == "FSDD2" {
code = "Financial System Development Division 2"
}
if e.section == "FSDD3" {
code = "Financial System Development Division 3"
}
return code
}
type Men interface {
CountryCode() string
Sections() string
}
func main() {
narita := &Employee{Person{"Mamoru", "Narita", "20100909","Male","JAPAN"}, "FSDD1", 5}
nagata := &Employee{Person{"Sho", "Nagata", "20100109","Female","USA"}, "FSDD2", 20}
mizuno := &Employee{Person{"Sanae", "Mizuno", "20000209","Female","JAPAN"}, "FSDD1", 10}
var i Men
// i = narita
fmt.Printf("Narita data is : %s\n", narita)
fmt.Printf("Narita Country is : %s\n", narita.CountryCode())
fmt.Printf("Narita Section is : %s\n", narita.Sections())
i = nagata
fmt.Printf("Nagata data is : %s\n", i.CountryCode())
fmt.Printf("Nagata Section is : %s\n", i.Sections())
i = mizuno
fmt.Printf("Mizuno data is : %s\n", i.CountryCode())
fmt.Printf("Mizuno Section is : %s\n", i.Sections())
}
|
package gui
import (
"image/color"
"math"
"github.com/jameshiew/fractal-explorer/internal/draw"
"github.com/jameshiew/fractal-explorer/internal/mandelbrot"
)
var (
red = color.RGBA64{R: 65535, A: 65535}
green = color.RGBA64{G: 65535, A: 65535}
blue = color.RGBA64{B: 65535, A: 65535}
)
// darkBlend is quite dark
func darkBlend(z complex128) color.Color {
return draw.Blend(
draw.NewColorizer(green, mandelbrot.NewImageBuilder().SetMaxIterations(125).Build())(z),
draw.NewColorizer(blue, mandelbrot.NewImageBuilder().SetMaxIterations(250).Build())(z),
draw.NewColorizer(red, mandelbrot.NewImageBuilder().SetMaxIterations(500).Build())(z),
)
}
func otherBlend(z complex128) color.Color {
return draw.Blend(
draw.NewColorizer(green, mandelbrot.NewImageBuilder().SetMaxIterations(120).SetBound(math.Phi).Build())(z),
draw.NewColorizer(color.RGBA64{
R: 20000,
G: 50000,
B: 20000,
A: 65535,
}, mandelbrot.NewImageBuilder().SetMaxIterations(100).SetBound(math.E).Build())(z),
draw.NewColorizer(color.RGBA64{
R: 16000,
G: 65335,
B: 16000,
A: 65535,
}, mandelbrot.NewImageBuilder().SetMaxIterations(75).SetBound(math.Pi).Build())(z),
)
}
|
package util
import (
"fmt"
"github.com/muesli/cache2go"
"time"
)
type myStruct struct {
text string
moreData []byte
}
func CacheUtil() {
cache := cache2go.Cache("myCache")
val := myStruct{"This is a test!", []byte{}}
cache.Add("someKey", 5*time.Second, &val)
res, err := cache.Value("someKey")
if err == nil {
fmt.Println("Found value in cache:", res.Data().(*myStruct).text)
} else {
fmt.Println("Error retrieving value from cache:", err)
}
}
|
package requests
import (
"fmt"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
)
// LoadCustomData Load custom user data.
//
// Arbitrary JSON data can be stored for a User. This API call
// retrieves that data for a (optional) given scope.
// See {api:UsersController#set_custom_data Store Custom Data} for details and
// examples.
//
// On success, this endpoint returns an object containing the data that was requested.
//
// Responds with status code 400 if the namespace parameter, +ns+, is missing or invalid,
// or if the specified scope does not contain any data.
// https://canvas.instructure.com/doc/api/users.html
//
// Path Parameters:
// # Path.UserID (Required) ID
//
// Query Parameters:
// # Query.Ns (Required) The namespace from which to retrieve the data. This should be something other
// Canvas API apps aren't likely to use, such as a reverse DNS for your organization.
//
type LoadCustomData struct {
Path struct {
UserID string `json:"user_id" url:"user_id,omitempty"` // (Required)
} `json:"path"`
Query struct {
Ns string `json:"ns" url:"ns,omitempty"` // (Required)
} `json:"query"`
}
func (t *LoadCustomData) GetMethod() string {
return "GET"
}
func (t *LoadCustomData) GetURLPath() string {
path := "users/{user_id}/custom_data"
path = strings.ReplaceAll(path, "{user_id}", fmt.Sprintf("%v", t.Path.UserID))
return path
}
func (t *LoadCustomData) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *LoadCustomData) GetBody() (url.Values, error) {
return nil, nil
}
func (t *LoadCustomData) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *LoadCustomData) HasErrors() error {
errs := []string{}
if t.Path.UserID == "" {
errs = append(errs, "'Path.UserID' is required")
}
if t.Query.Ns == "" {
errs = append(errs, "'Query.Ns' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *LoadCustomData) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
package model
import (
"encoding/json"
"fmt"
"math"
"testing"
"github.com/kylelemons/godebug/pretty"
)
func TestNewDataPoint(t *testing.T) {
p := NewDataPoint(1000, 0.1)
if p.Timestamp() != 1000 {
t.Fatalf("\nExpected: %+v\nActual: %+v", 1000, p.Timestamp())
}
if p.Value() != 0.1 {
t.Fatalf("\nExpected: %+v\nActual: %+v", 0.1, p.Value())
}
}
func TestDataPointMarshalJSON(t *testing.T) {
tests := []struct {
desc string
point *DataPoint
expected string
}{
{"not NaN", NewDataPoint(100, 10.5), "[10.5,100]"},
{"NaN", NewDataPoint(100, math.NaN()), "[null,100]"},
}
for _, tc := range tests {
j, _ := json.Marshal(tc.point)
got := fmt.Sprintf("%s", j)
if diff := pretty.Compare(got, tc.expected); diff != "" {
t.Fatalf("diff: (-actual +expected)\n%s", diff)
}
}
}
func TestDataPointsSort(t *testing.T) {
points := DataPoints{
NewDataPoint(10000, 0.2),
NewDataPoint(1000, 0.1),
NewDataPoint(1120, 0.2),
NewDataPoint(1060, 0.3),
NewDataPoint(900, 0.2),
}
points.Sort()
expected := DataPoints{
NewDataPoint(900, 0.2),
NewDataPoint(1000, 0.1),
NewDataPoint(1060, 0.3),
NewDataPoint(1120, 0.2),
NewDataPoint(10000, 0.2),
}
if diff := pretty.Compare(points, expected); diff != "" {
t.Fatalf("diff: (-actual +expected)\n%s", diff)
}
}
func TestDataPointsDeduplicate(t *testing.T) {
points := DataPoints{
NewDataPoint(900, 0.5),
NewDataPoint(900, 0.2),
NewDataPoint(1000, 0.1),
NewDataPoint(1060, 0.3),
NewDataPoint(1060, math.NaN()),
NewDataPoint(1120, 0.2),
NewDataPoint(1120, 0.1),
}
points = points.Deduplicate()
expected := DataPoints{
NewDataPoint(900, 0.2),
NewDataPoint(1000, 0.1),
NewDataPoint(1060, 0.3), // Don't overwrite with NaN
NewDataPoint(1120, 0.1),
}
if diff := pretty.Compare(points, expected); diff != "" {
t.Fatalf("diff: (-actual +expected)\n%s", diff)
}
}
func TestDataPointAlignTimestamp(t *testing.T) {
points := DataPoints{
NewDataPoint(10, 0.1),
NewDataPoint(120, 0.2),
NewDataPoint(220, 0.3),
NewDataPoint(230, 0.4),
NewDataPoint(335, 0.5),
}
got := points.AlignTimestamp(60)
expected := DataPoints{
NewDataPoint(0, 0.1),
NewDataPoint(120, 0.2),
NewDataPoint(180, 0.3),
NewDataPoint(180, 0.4),
NewDataPoint(300, 0.5),
}
if diff := pretty.Compare(got, expected); diff != "" {
t.Fatalf("diff: (-actual +expected)\n%s", diff)
}
}
|
package persistence
import (
"database/sql"
"fmt"
"fp-dynamic-elements-manager-controller/internal/logging/structs"
"github.com/jmoiron/sqlx"
"time"
)
const (
BatchTable = "element_batches"
)
type ElementBatchRepo struct {
db *sqlx.DB
log *structs.AppLogger
}
func NewElementBatchRepo(appDb *sqlx.DB, logger *structs.AppLogger) *ElementBatchRepo {
return &ElementBatchRepo{db: appDb, log: logger}
}
func (e *ElementBatchRepo) InsertBatchElement() (res sql.Result, err error) {
now := time.Now()
smt := fmt.Sprintf("INSERT INTO %s (created_at, updated_at) VALUES (?,?)", BatchTable)
tx, err := e.db.Begin()
if err != nil {
e.log.SystemLogger.Error(err, "Error starting transaction inserting batch element")
return
}
res, err = tx.Exec(smt, now, now)
if err != nil {
e.log.SystemLogger.Error(err, "Error inserting batch element, rolling back")
tx.Rollback()
return
}
err = tx.Commit()
if err != nil {
e.log.SystemLogger.Error(err, "Insert batch element commit failed")
return
}
return
}
func (e *ElementBatchRepo) GetBatchIds() (receiver []int64, err error) {
err = e.db.Select(&receiver, fmt.Sprintf("SELECT id FROM %s", BatchTable))
return
}
|
package storage
import (
"log"
"code.google.com/p/gcfg"
)
type blog struct {
Title string
Subtitle string
Owner string
ArticlesPerPage int
DataBase string
}
type captcha struct {
Public string
Private string
}
type comments struct {
Maxlen int
Enabled bool
}
type smtp struct {
Server string
Sender string
}
type google struct {
AnalyticsID string
WebmasterID string
}
type Config struct {
Blog blog
Captcha captcha
Comments comments
Smtp smtp
Google google
}
func ReadConf(fname string) (c Config, err error) {
log.Println("Read", fname)
return c, gcfg.ReadFileInto(&c, fname)
}
|
package innerSortImplTest
import (
"AlgorithmPractice/src/DataStructure/sort/innerSort/innerSortImpl"
"AlgorithmPractice/src/UnitTest/DataStructureTest/sortTest/innerSortTest"
"testing"
)
func TestBucketSort(t *testing.T) {
innerSortTest.SortSQLTest(t, &innerSortImpl.BucketSort{})
/* grammar: 判断数组是否相等
a := []int{1, 2, 3, 4}
b := []int{1, 3, 2, 4}
c := []int{1, 2, 3, 4}
fmt.Println(reflect.DeepEqual(a, b))
fmt.Println(reflect.DeepEqual(a, c))*/
/* grammar: 判断数组是否相等
a := []byte{0, 1, 3, 2}
b := []byte{0, 1, 3, 2}
c := []byte{1, 1, 3, 2}
fmt.Println(bytes.Equal(a, b))
fmt.Println(bytes.Equal(a, c))*/
}
|
package connector
import (
"daerclient"
"logger"
"majiangclient"
"pockerclient"
"rpc"
)
//匹配房公共
func (self *CNServer) EnterRoomREQ(conn rpc.RpcConn, msg rpc.EnterRoomREQ) error {
logger.Info("client call EnterRoomREQ begin")
p, exist := self.getPlayerByConnId(conn.GetId())
if !exist {
return nil
}
// p.whichGame["daer"] = msg.GetRoomType()
p.SetGameType(msg.GetGameType())
p.SetRoomType(msg.GetRoomType())
switch msg.GetGameType() {
case "1":
daerclient.EnterDaerRoom(p.PlayerBaseInfo, &msg)
case "2":
majiangclient.EnterMaJiangRoom(p.PlayerBaseInfo, &msg)
case "3":
pockerclient.EnterPockerRoom(p.PlayerBaseInfo, msg.GetGameType(), msg.GetRoomType())
default:
logger.Error("未知的游戏类型")
}
return nil
}
//在线人数
func (self *CNServer) GetOnlineInfo(conn rpc.RpcConn, msg rpc.OnlinePlayerReq) error {
logger.Info("GetOnlineInfo has been called begain")
_, exist := self.getPlayerByConnId(conn.GetId())
if !exist {
return nil
}
onlineInfo := &rpc.OnlinePlayerMsg{}
for _, id := range msg.PartIds {
if id == int32(1) { //大二
daerInfo, err := daerclient.GetDaerServrOnlineNum()
if daerInfo == nil {
logger.Error("after login call daerclient.GetDaerServrOnlineNum() return nil, err:%s", err)
return nil
}
onlineInfo.SetDaerInfo(daerInfo)
} else if id == int32(2) { //麻将
majiangInfo, err := majiangclient.GetMaJiangServrOnlineNum()
if majiangInfo == nil {
logger.Error("after login call majiangclient.GetMaJiangServrOnlineNum() return nil, err:%s", err)
return nil
}
onlineInfo.SetDaerInfo(majiangInfo)
} else if id == int32(3) { ///德州扑克
} else {
logger.Error("GetOnlineInfo client param err:%d", id)
}
}
WriteResult(conn, onlineInfo)
return nil
}
|
/*
Copyright 2017 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"io"
)
/* PG Error Severity Levels */
const (
ErrorSeverityFatal string = "FATAL"
ErrorSeverityPanic string = "PANIC"
ErrorSeverityWarning string = "WARNING"
ErrorSeverityNotice string = "NOTICE"
ErrorSeverityDebug string = "DEBUG"
ErrorSeverityInfo string = "INFO"
ErrorSeverityLog string = "LOG"
)
/* PG Error Message Field Identifiers */
const (
ErrorFieldSeverity byte = 'S'
ErrorFieldCode byte = 'C'
ErrorFieldMessage byte = 'M'
ErrorFieldMessageDetail byte = 'D'
ErrorFieldMessageHint byte = 'H'
)
const (
// ErrorCodeInternalError indicates an unspecified internal error.
ErrorCodeInternalError = "XX000"
)
const unparseableErr = "unparseable error from postgres server"
// Error is a Postgresql processing error.
type Error struct {
Severity string
Code string
Message string
Detail string
Hint string
}
func (e *Error) Error() string {
return fmt.Sprintf("pg: %s: %s", e.Severity, e.Message)
}
// NewError constructs a protocol error from an error packet. This is done,
// instead of passing the error through to the client as is, to give Secretless
// the ability to modify the error contents.
//
// NewError parses the error packet and populates the protocol error with the
// field types as detailed in the protocol documentation:
// https://www.postgresql.org/docs/9.1/protocol-error-fields.html. If parsing
// fails NewError returns an "unparseable error from postgres server" error.
//
// NOTE: We have made a conscious choice to propagate a subset of the error
// field types. We focus on those that are always present and, by our
// assessment, hold the most salient information.
func NewError(data []byte) error {
constructedErr := &Error{}
b := NewMessageBuffer(data)
var (
readErr error
fieldType byte
fieldContents string
)
for {
fieldType, readErr = b.ReadByte()
// We're done reading
if readErr == io.EOF {
break
}
// Unexpected error
if readErr != nil {
return fmt.Errorf(unparseableErr)
}
fieldContents, readErr = b.ReadString()
// We're done reading
if readErr == io.EOF {
break
}
// Unexpected error
if readErr != nil {
return fmt.Errorf(unparseableErr)
}
switch fieldType {
case 'S':
constructedErr.Severity = fieldContents
case 'C':
constructedErr.Code = fieldContents
case 'M':
constructedErr.Message = fieldContents
case 'D':
constructedErr.Detail = fieldContents
case 'H':
constructedErr.Hint = fieldContents
}
}
return constructedErr
}
// GetPacket formats an Error into a protocol message.
func (e *Error) GetPacket() []byte {
msg := NewMessageBuffer([]byte{})
msg.WriteByte(ErrorMessageType)
msg.WriteInt32(0)
msg.WriteByte(ErrorFieldSeverity)
msg.WriteString(e.Severity)
msg.WriteByte(ErrorFieldCode)
msg.WriteString(e.Code)
msg.WriteByte(ErrorFieldMessage)
msg.WriteString(e.Message)
if e.Detail != "" {
msg.WriteByte(ErrorFieldMessageDetail)
msg.WriteString(e.Detail)
}
if e.Hint != "" {
msg.WriteByte(ErrorFieldMessageHint)
msg.WriteString(e.Hint)
}
msg.WriteByte(0x00) // null terminate the message
msg.ResetLength(PGMessageLengthOffset)
return msg.Bytes()
}
|
package datagramsession
import (
"bytes"
"context"
"fmt"
"io"
"net"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/packet"
)
// TestCloseSession makes sure a session will stop after context is done
func TestSessionCtxDone(t *testing.T) {
testSessionReturns(t, closeByContext, time.Minute*2)
}
// TestCloseSession makes sure a session will stop after close method is called
func TestCloseSession(t *testing.T) {
testSessionReturns(t, closeByCallingClose, time.Minute*2)
}
// TestCloseIdle makess sure a session will stop after there is no read/write for a period defined by closeAfterIdle
func TestCloseIdle(t *testing.T) {
testSessionReturns(t, closeByTimeout, time.Millisecond*100)
}
func testSessionReturns(t *testing.T, closeBy closeMethod, closeAfterIdle time.Duration) {
var (
localCloseReason = &errClosedSession{
message: "connection closed by origin",
byRemote: false,
}
)
sessionID := uuid.New()
cfdConn, originConn := net.Pipe()
payload := testPayload(sessionID)
log := zerolog.Nop()
mg := NewManager(&log, nil, nil)
session := mg.newSession(sessionID, cfdConn)
ctx, cancel := context.WithCancel(context.Background())
sessionDone := make(chan struct{})
go func() {
closedByRemote, err := session.Serve(ctx, closeAfterIdle)
switch closeBy {
case closeByContext:
require.Equal(t, context.Canceled, err)
require.False(t, closedByRemote)
case closeByCallingClose:
require.Equal(t, localCloseReason, err)
require.Equal(t, localCloseReason.byRemote, closedByRemote)
case closeByTimeout:
require.Equal(t, SessionIdleErr(closeAfterIdle), err)
require.False(t, closedByRemote)
}
close(sessionDone)
}()
go func() {
n, err := session.transportToDst(payload)
require.NoError(t, err)
require.Equal(t, len(payload), n)
}()
readBuffer := make([]byte, len(payload)+1)
n, err := originConn.Read(readBuffer)
require.NoError(t, err)
require.Equal(t, len(payload), n)
lastRead := time.Now()
switch closeBy {
case closeByContext:
cancel()
case closeByCallingClose:
session.close(localCloseReason)
}
<-sessionDone
if closeBy == closeByTimeout {
require.True(t, time.Now().After(lastRead.Add(closeAfterIdle)))
}
// call cancelled again otherwise the linter will warn about possible context leak
cancel()
}
type closeMethod int
const (
closeByContext closeMethod = iota
closeByCallingClose
closeByTimeout
)
func TestWriteToDstSessionPreventClosed(t *testing.T) {
testActiveSessionNotClosed(t, false, true)
}
func TestReadFromDstSessionPreventClosed(t *testing.T) {
testActiveSessionNotClosed(t, true, false)
}
func testActiveSessionNotClosed(t *testing.T, readFromDst bool, writeToDst bool) {
const closeAfterIdle = time.Millisecond * 100
const activeTime = time.Millisecond * 500
sessionID := uuid.New()
cfdConn, originConn := net.Pipe()
payload := testPayload(sessionID)
respChan := make(chan *packet.Session)
sender := newMockTransportSender(sessionID, payload)
mg := NewManager(&nopLogger, sender.muxSession, respChan)
session := mg.newSession(sessionID, cfdConn)
startTime := time.Now()
activeUntil := startTime.Add(activeTime)
ctx, cancel := context.WithCancel(context.Background())
errGroup, ctx := errgroup.WithContext(ctx)
errGroup.Go(func() error {
session.Serve(ctx, closeAfterIdle)
if time.Now().Before(startTime.Add(activeTime)) {
return fmt.Errorf("session closed while it's still active")
}
return nil
})
if readFromDst {
errGroup.Go(func() error {
for {
if time.Now().After(activeUntil) {
return nil
}
if _, err := originConn.Write(payload); err != nil {
return err
}
time.Sleep(closeAfterIdle / 2)
}
})
}
if writeToDst {
errGroup.Go(func() error {
readBuffer := make([]byte, len(payload))
for {
n, err := originConn.Read(readBuffer)
if err != nil {
if err == io.EOF || err == io.ErrClosedPipe {
return nil
}
return err
}
if !bytes.Equal(payload, readBuffer[:n]) {
return fmt.Errorf("payload %v is not equal to %v", readBuffer[:n], payload)
}
}
})
errGroup.Go(func() error {
for {
if time.Now().After(activeUntil) {
return nil
}
if _, err := session.transportToDst(payload); err != nil {
return err
}
time.Sleep(closeAfterIdle / 2)
}
})
}
require.NoError(t, errGroup.Wait())
cancel()
}
func TestMarkActiveNotBlocking(t *testing.T) {
const concurrentCalls = 50
mg := NewManager(&nopLogger, nil, nil)
session := mg.newSession(uuid.New(), nil)
var wg sync.WaitGroup
wg.Add(concurrentCalls)
for i := 0; i < concurrentCalls; i++ {
go func() {
session.markActive()
wg.Done()
}()
}
wg.Wait()
}
// Some UDP application might send 0-size payload.
func TestZeroBytePayload(t *testing.T) {
sessionID := uuid.New()
cfdConn, originConn := net.Pipe()
sender := sendOnceTransportSender{
baseSender: newMockTransportSender(sessionID, make([]byte, 0)),
sentChan: make(chan struct{}),
}
mg := NewManager(&nopLogger, sender.muxSession, nil)
session := mg.newSession(sessionID, cfdConn)
ctx, cancel := context.WithCancel(context.Background())
errGroup, ctx := errgroup.WithContext(ctx)
errGroup.Go(func() error {
// Read from underlying conn and send to transport
closedByRemote, err := session.Serve(ctx, time.Minute*2)
require.Equal(t, context.Canceled, err)
require.False(t, closedByRemote)
return nil
})
errGroup.Go(func() error {
// Write to underlying connection
n, err := originConn.Write([]byte{})
require.NoError(t, err)
require.Equal(t, 0, n)
return nil
})
<-sender.sentChan
cancel()
require.NoError(t, errGroup.Wait())
}
type mockTransportSender struct {
expectedSessionID uuid.UUID
expectedPayload []byte
}
func newMockTransportSender(expectedSessionID uuid.UUID, expectedPayload []byte) *mockTransportSender {
return &mockTransportSender{
expectedSessionID: expectedSessionID,
expectedPayload: expectedPayload,
}
}
func (mts *mockTransportSender) muxSession(session *packet.Session) error {
if session.ID != mts.expectedSessionID {
return fmt.Errorf("Expect session %s, got %s", mts.expectedSessionID, session.ID)
}
if !bytes.Equal(session.Payload, mts.expectedPayload) {
return fmt.Errorf("Expect %v, read %v", mts.expectedPayload, session.Payload)
}
return nil
}
type sendOnceTransportSender struct {
baseSender *mockTransportSender
sentChan chan struct{}
}
func (sots *sendOnceTransportSender) muxSession(session *packet.Session) error {
defer close(sots.sentChan)
return sots.baseSender.muxSession(session)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package dns
import (
"context"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"chromiumos/tast/common/crypto/certificate"
"chromiumos/tast/common/shillconst"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/apps"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/checked"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/input"
"chromiumos/tast/local/network/virtualnet"
"chromiumos/tast/local/network/virtualnet/certs"
"chromiumos/tast/local/network/virtualnet/dnsmasq"
"chromiumos/tast/local/network/virtualnet/env"
"chromiumos/tast/local/network/virtualnet/httpserver"
"chromiumos/tast/local/network/virtualnet/subnet"
"chromiumos/tast/local/shill"
"chromiumos/tast/local/vm"
"chromiumos/tast/testing"
)
// DoHMode defines possible type of DNS-over-HTTPS.
type DoHMode int
const (
// DoHOff is a mode that resolves DNS through plaintext.
DoHOff DoHMode = iota
// DoHAutomatic is a mode that automatically chose between plaintext and secure DNS.
DoHAutomatic
// DoHAlwaysOn is a mode that resolves DNS through secure DNS.
DoHAlwaysOn
)
// Client defines the client resolving DNS.
type Client int
const (
// System is a DNS client type for systems.
System Client = iota
// User is a DNS client type for users (e.g. cups, tlsdate).
User
// Chrome is a DNS client type with user 'chronos'.
Chrome
// Crostini is a DNS client type for Crostini.
Crostini
// ARC is a DNS client type for ARC.
ARC
)
// Env wraps the test environment created for DNS tests.
type Env struct {
Router *env.Env
server *env.Env
manager *shill.Manager
Certs *certs.Certs
cleanupCerts func(context.Context)
}
// GoogleDoHProvider is the Google DNS-over-HTTPS provider.
const GoogleDoHProvider = "https://dns.google/dns-query"
// ExampleDoHProvider is a fake DNS-over-HTTPS provider used for testing using virtualnet package.
// The URL must match the CA certificate used by virtualnet/certs/cert.go.
const ExampleDoHProvider = "https://www.example.com/dns-query"
// DigProxyIPRE is the regular expressions for DNS proxy IP inside dig output.
var DigProxyIPRE = regexp.MustCompile(`SERVER: 100.115.92.\d+#53`)
// GetClientString get the string representation of a DNS client.
func GetClientString(c Client) string {
switch c {
case System:
return "system"
case User:
return "user"
case Chrome:
return "Chrome"
case Crostini:
return "Crostini"
case ARC:
return "ARC"
default:
return ""
}
}
// SetDoHMode updates ChromeOS setting to change DNS-over-HTTPS mode.
func SetDoHMode(ctx context.Context, cr *chrome.Chrome, tconn *chrome.TestConn, mode DoHMode, dohProvider string) error {
conn, err := apps.LaunchOSSettings(ctx, cr, "chrome://os-settings/osPrivacy")
if err != nil {
return errors.Wrap(err, "failed to get connection to OS Settings")
}
defer conn.Close()
ac := uiauto.New(tconn)
// Toggle secure DNS, the UI might lag, keep trying until secure DNS is toggled to the expected state.
leftClickAc := ac.WithInterval(2 * time.Second)
var toggleSecureDNS = func(ctx context.Context, check checked.Checked) error {
tb := nodewith.Role(role.ToggleButton).Name("Use secure DNS")
var secureDNSChecked = func(ctx context.Context) error {
tbInfo, err := ac.Info(ctx, tb)
if err != nil {
return errors.Wrap(err, "failed to find secure DNS toggle button")
}
if tbInfo.Checked != check {
return errors.Errorf("secure DNS toggle button checked: %s", check)
}
return nil
}
if err := leftClickAc.LeftClickUntil(tb, secureDNSChecked)(ctx); err != nil {
return errors.Wrap(err, "failed to toggle secure DNS button")
}
return nil
}
switch mode {
case DoHOff:
if err := toggleSecureDNS(ctx, checked.False); err != nil {
return err
}
break
case DoHAutomatic:
if err := toggleSecureDNS(ctx, checked.True); err != nil {
return err
}
rb := nodewith.Role(role.RadioButton).Name("With your current service provider")
if err := ac.LeftClick(rb)(ctx); err != nil {
return errors.Wrap(err, "failed to enable automatic mode")
}
break
case DoHAlwaysOn:
if err := toggleSecureDNS(ctx, checked.True); err != nil {
return err
}
// Get a handle to the input keyboard.
kb, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to get keyboard")
}
defer kb.Close()
m, err := input.Mouse(ctx)
if err != nil {
return errors.Wrap(err, "failed to get mouse")
}
defer m.Close()
// On some devices, the text field for the provider might be hidden by the bottom bar.
// Scroll down then focus on the text field.
if err := m.ScrollDown(); err != nil {
return errors.Wrap(err, "failed to scroll down")
}
// Find secure DNS text field through its parent.
gcs, err := ac.NodesInfo(ctx, nodewith.Role(role.GenericContainer))
if err != nil {
return errors.Wrap(err, "failed to get generic container nodes")
}
nth := -1
for i, e := range gcs {
if attr, ok := e.HTMLAttributes["id"]; ok && attr == "secureDnsInput" {
nth = i
break
}
}
if nth < 0 {
return errors.Wrap(err, "failed to find secure DNS text field")
}
tf := nodewith.Role(role.TextField).Ancestor(nodewith.Role(role.GenericContainer).Nth(nth))
if err := ac.FocusAndWait(tf)(ctx); err != nil {
return errors.Wrap(err, "failed to focus on the text field")
}
rg := nodewith.Role(role.RadioGroup)
if err := ac.WaitForLocation(rg)(ctx); err != nil {
return errors.Wrap(err, "failed to wait for radio group")
}
rbsInfo, err := ac.NodesInfo(ctx, nodewith.Role(role.RadioButton).Ancestor(rg))
if err != nil {
return errors.Wrap(err, "failed to get secure DNS radio buttons information")
}
var rbLocation coords.Rect
var found = false
for _, e := range rbsInfo {
if e.Name != "With your current service provider" {
rbLocation = e.Location
found = true
break
}
}
if !found {
return errors.Wrap(err, "failed to find secure DNS radio button")
}
if err := uiauto.Combine("enable DoH always on with a custom provider",
// Click use current service provider radio button.
ac.MouseClickAtLocation(0, rbLocation.CenterPoint()),
// Input a custom DoH provider.
ac.LeftClick(tf),
kb.AccelAction("Ctrl+A"),
kb.AccelAction("Backspace"),
kb.TypeAction(dohProvider),
kb.AccelAction("Enter"),
)(ctx); err != nil {
return errors.Wrap(err, "failed to enable DoH with a custom provider")
}
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
if m, err := getDoHMode(ctx); err != nil {
return err
} else if m != mode {
return errors.New("failed to get the correct DoH mode")
}
return nil
}, &testing.PollOptions{Timeout: 3 * time.Second}); err != nil {
return err
}
return nil
}
// RandDomain returns a random domain name that can be useful for avoiding caching while testing DNS queries.
func RandDomain() string {
return strconv.FormatInt(time.Now().UnixNano(), 16) + ".com"
}
// QueryOptions are provided to QueryDNS to configure the lookup query.
type QueryOptions struct {
Domain string
Nameserver string
}
// NewQueryOptions returns a new options pre-populated with a random domain for testing.
func NewQueryOptions() *QueryOptions {
return &QueryOptions{
Domain: RandDomain(),
}
}
func (o QueryOptions) digArgs() []string {
args := []string{o.Domain}
if o.Nameserver != "" {
args = append(args, "@"+o.Nameserver)
}
return args
}
// QueryDNS resolves a domain through DNS with a specific client.
func QueryDNS(ctx context.Context, c Client, a *arc.ARC, cont *vm.Container, opts *QueryOptions) error {
args := opts.digArgs()
var u string
switch c {
case System:
return testexec.CommandContext(ctx, "dig", args...).Run()
case User:
u = "cups"
case Chrome:
u = "chronos"
case Crostini:
return cont.Command(ctx, append([]string{"dig"}, args...)...).Run()
case ARC:
out, err := a.Command(ctx, "dumpsys", "wifi", "tools", "dns", opts.Domain).Output()
if err != nil {
return errors.Wrap(err, "failed to do ARC DNS query")
}
// At least one IP response must be observed.
for _, l := range strings.Split(string(out), "\n") {
if net.ParseIP(strings.TrimSpace(l)) != nil {
return nil
}
}
return errors.New("failed to resolve domain")
default:
return errors.New("unknown client")
}
return testexec.CommandContext(ctx, "sudo", append([]string{"-u", u, "dig"}, args...)...).Run()
}
// ProxyTestCase contains test case for DNS proxy tests.
type ProxyTestCase struct {
Client Client
ExpectErr bool
AllowRetry bool
}
// TestQueryDNSProxy runs a set of test cases for DNS proxy.
func TestQueryDNSProxy(ctx context.Context, tcs []ProxyTestCase, a *arc.ARC, cont *vm.Container, opts *QueryOptions) []error {
var errs []error
for _, tc := range tcs {
if err := testing.Poll(ctx, func(ctx context.Context) error {
var err error
qErr := QueryDNS(ctx, tc.Client, a, cont, opts)
if qErr != nil && !tc.ExpectErr {
err = errors.Wrapf(qErr, "DNS query failed for %s", GetClientString(tc.Client))
}
if qErr == nil && tc.ExpectErr {
err = errors.Errorf("successful DNS query for %s, but expected failure", GetClientString(tc.Client))
}
if !tc.AllowRetry {
return testing.PollBreak(err)
}
return err
}, &testing.PollOptions{Timeout: 15 * time.Second}); err != nil {
errs = append(errs, err)
}
}
return errs
}
// InstallDigInContainer installs dig in container.
func InstallDigInContainer(ctx context.Context, cont *vm.Container) error {
// Check whether dig is preinstalled or not.
if err := cont.Command(ctx, "dig", "-v").Run(); err == nil {
return nil
}
// Run command sudo apt update in container. Ignore the error because this might fail for unrelated reasons.
cont.Command(ctx, "sudo", "apt", "update").Run(testexec.DumpLogOnError)
// Run command sudo apt install dnsutils in container.
if err := cont.Command(ctx, "sudo", "DEBIAN_FRONTEND=noninteractive", "apt-get", "-y", "install", "dnsutils").Run(testexec.DumpLogOnError); err != nil {
return errors.Wrap(err, "failed to run command sudo apt install dnsutils in container")
}
// Run command dig -v and check the output to make sure vim has been installed successfully.
if err := cont.Command(ctx, "dig", "-v").Run(); err != nil {
return errors.Wrap(err, "failed to install dig in container")
}
return nil
}
// getDoHProviders returns the current DNS-over-HTTPS providers.
func getDoHProviders(ctx context.Context) (map[string]interface{}, error) {
m, err := shill.NewManager(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to create shill manager object")
}
props, err := m.GetProperties(ctx)
if err != nil {
return nil, err
}
out, err := props.Get(shillconst.ManagerPropertyDOHProviders)
if err != nil {
return nil, err
}
providers, ok := out.(map[string]interface{})
if !ok {
return nil, errors.Errorf("property %s is not a map of string to interface: %q", shillconst.ManagerPropertyDOHProviders, out)
}
return providers, nil
}
// getDoHMode returns the current DNS-over-HTTPS mode.
func getDoHMode(ctx context.Context) (DoHMode, error) {
providers, err := getDoHProviders(ctx)
if err != nil || len(providers) == 0 {
return DoHOff, err
}
for _, ns := range providers {
if ns == "" {
continue
}
return DoHAutomatic, nil
}
return DoHAlwaysOn, nil
}
// DigMatch runs dig to check name resolution works and verifies the expected server was used.
func DigMatch(ctx context.Context, re *regexp.Regexp, match bool) error {
out, err := testexec.CommandContext(ctx, "dig", "google.com").Output()
if err != nil {
return errors.Wrap(err, "dig failed")
}
if re.MatchString(string(out)) != match {
return errors.New("dig used unexpected nameserver")
}
return nil
}
// queryDNS queries DNS to |addr| through UDP port 53 and returns the response.
func queryDNS(ctx context.Context, msg []byte, addr string) ([]byte, error) {
var d net.Dialer
conn, err := d.DialContext(ctx, "udp", addr+":53")
if err != nil {
return nil, err
}
defer conn.Close()
if _, err := conn.Write(msg); err != nil {
return nil, err
}
resp := make([]byte, 512)
n, err := conn.Read(resp)
if err != nil {
return nil, err
}
return resp[:n], nil
}
// dohResponder returns a function that responds to HTTPS queries by proxying the queries to DNS server on |addr|.
func dohResponder(ctx context.Context, addr string) func(http.ResponseWriter, *http.Request) {
return func(rw http.ResponseWriter, req *http.Request) {
msg, err := ioutil.ReadAll(req.Body)
if err != nil {
testing.ContextLog(ctx, "Failed to read HTTPS request: ", err)
return
}
resp, err := queryDNS(ctx, msg, addr)
if err != nil {
testing.ContextLog(ctx, "Failed to query DNS: ", err)
return
}
rw.Header().Set("content-type", "application/dns-message")
if _, err := rw.Write(resp); err != nil {
testing.ContextLog(ctx, "Failed to write HTTPS response: ", err)
}
}
}
// Cleanup cleans anything that is set up through NewEnv. This needs to be called whenever env is not needed anymore.
func (e *Env) Cleanup(ctx context.Context) {
if e.server != nil {
if err := e.server.Cleanup(ctx); err != nil {
testing.ContextLog(ctx, "Failed to cleanup server env: ", err)
}
}
if e.Router != nil {
if err := e.Router.Cleanup(ctx); err != nil {
testing.ContextLog(ctx, "Failed to cleanup router env: ", err)
}
}
if e.cleanupCerts != nil {
e.cleanupCerts(ctx)
if err := restartDNSProxy(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restart DNS proxy: ", err)
}
}
if e.manager != nil {
if err := e.manager.SetProperty(ctx, shillconst.ProfilePropertyCheckPortalList, "ethernet,wifi,cellular"); err != nil {
testing.ContextLog(ctx, "Failed to revert check portal list property: ", err)
}
}
}
// NewEnv creates a DNS environment including router that acts as the default network and a server that responds to DNS and DoH queries.
// On success, the caller is responsible to cleanup the environment through the |Cleanup| function.
func NewEnv(ctx context.Context, pool *subnet.Pool) (env *Env, err error) {
e := &Env{}
defer func() {
if e != nil {
e.Cleanup(ctx)
}
}()
// Shill-related setup.
e.manager, err = shill.NewManager(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to create manager proxy")
}
testing.ContextLog(ctx, "Disabling portal detection on ethernet")
if err := e.manager.SetProperty(ctx, shillconst.ProfilePropertyCheckPortalList, "wifi,cellular"); err != nil {
return nil, errors.Wrap(err, "failed to disable portal detection on ethernet")
}
// Install test certificates for HTTPS server. In doing so, virtualnet/certs will mount a test certificate directory.
// Because DNS proxy lives in its own namespace, it needs to be restarted to be able to see the test certificates.
httpsCerts := certs.New(certs.SSLCrtPath, certificate.TestCert3())
e.cleanupCerts, err = httpsCerts.InstallTestCerts(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to setup certificates")
}
if err := restartDNSProxy(ctx); err != nil {
return nil, errors.Wrap(err, "failed to restart DNS proxy")
}
// Allocate subnet for DNS server.
serverIPv4Subnet, err := pool.AllocNextIPv4Subnet()
if err != nil {
return nil, errors.Wrap(err, "failed to allocate v4 subnet")
}
serverIPv6Subnet, err := pool.AllocNextIPv6Subnet()
if err != nil {
return nil, errors.Wrap(err, "failed to allocate v6 subnet")
}
serverSubnetAddr := serverIPv4Subnet.IP.To4()
// This assumes that the server will use the IPv4 address xx.xx.xx.2 from env's ConnectToRouter internal implementation.
serverAddr := net.IPv4(serverSubnetAddr[0], serverSubnetAddr[1], serverSubnetAddr[2], 2)
var svc *shill.Service
svc, e.Router, err = virtualnet.CreateRouterEnv(ctx, e.manager, pool, virtualnet.EnvOptions{
Priority: 5,
NameSuffix: "",
IPv4DNSServers: []string{serverAddr.String()},
EnableDHCP: true,
RAServer: false,
})
if err != nil {
return nil, errors.Wrap(err, "failed to set up router env")
}
if err := svc.WaitForProperty(ctx, shillconst.ServicePropertyState, shillconst.ServiceStateOnline, 10*time.Second); err != nil {
return nil, errors.Wrap(err, "failed to wait for base service online")
}
e.server, err = NewServer(ctx, "server", serverIPv4Subnet, serverIPv6Subnet, e.Router, httpsCerts)
if err != nil {
return nil, errors.Wrap(err, "failed to set up server env")
}
env = e
e = nil
return env, nil
}
// NewServer creates a server that responds to DNS and DoH queries.
func NewServer(ctx context.Context, envName string, ipv4Subnet, ipv6Subnet *net.IPNet, routerEnv *env.Env, httpsCerts *certs.Certs) (*env.Env, error) {
success := false
server := env.New(envName)
if err := server.SetUp(ctx); err != nil {
return nil, errors.Wrap(err, "failed to set up server env")
}
defer func() {
if success {
return
}
if err := server.Cleanup(ctx); err != nil {
testing.ContextLog(ctx, "Failed to cleanup server env: ", err)
}
}()
if err := server.ConnectToRouter(ctx, routerEnv, ipv4Subnet, ipv6Subnet); err != nil {
return nil, errors.Wrap(err, "failed to connect server to router")
}
// Get server IPv4 address.
addr, err := server.GetVethInAddrs(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get env addresses")
}
// Start a DNS server.
if err := server.StartServer(ctx, "dnsmasq", dnsmasq.New(dnsmasq.WithResolveHost("", addr.IPv4Addr))); err != nil {
return nil, errors.Wrap(err, "failed to start dnsmasq")
}
// Start a DoH server.
httpsserver := httpserver.New("443", dohResponder(ctx, addr.IPv4Addr.String()), httpsCerts)
if err := server.StartServer(ctx, "httpsserver", httpsserver); err != nil {
return nil, errors.Wrap(err, "failed to start DoH server")
}
success = true
return server, nil
}
|
package balance
import (
"testing"
)
func TestHashing(t *testing.T) {
hash := New(10, nil)
hash.Add("9", "2", "8")
testCases := map[string]string{
"10": "10",
"13": "13",
"29": "29",
"20": "20",
}
for k, v := range testCases {
if hash.Get(k) != v {
t.Errorf("Asking for %s, get %s", v, hash.Get(k))
}
}
hash.Add("7")
for k, v := range testCases {
if hash.Get(k) != v {
t.Errorf("Asking for %s, get %s", v, hash.Get(k))
}
}
hash.Remove("7")
for k, v := range testCases {
if hash.Get(k) != v {
t.Errorf("Asking for %s, get %s", v, hash.Get(k))
}
}
}
|
package graylogger
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"testing"
"bou.ke/monkey"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
var testInit = Init{
//GraylogHost: "127.0.0.1",
//GraylogPort: 12201,
//GraylogProvider: "TestService",
//GraylogProtocol: TransportUDP,
LogEnv: "test",
LogLevel: LevelDebug,
LogColor: false,
}
var testOutputFileName = "test.out"
type outputSuite struct {
suite.Suite
}
func (s outputSuite) TestNew() {
g := New(testInit)
s.Equal(testInit.GraylogHost, g.initData.GraylogHost)
s.Equal(testInit.GraylogPort, g.initData.GraylogPort)
s.Equal(testInit.GraylogProtocol, g.initData.GraylogProtocol)
s.Equal(testInit.GraylogProvider, g.initData.GraylogProvider)
s.Equal(testInit.LogLevel, g.initData.LogLevel)
s.Equal(testInit.LogEnv, g.initData.LogEnv)
s.Equal(testInit.LogColor, g.initData.LogColor)
s.Equal(true, g.IsAllowedOutput())
}
func (s outputSuite) TestNewValidateLogLevelFatal() {
init := testInit
init.GraylogHost = "127.0.0.1"
init.GraylogPort = 12201
init.GraylogProvider = "TestService"
init.GraylogProtocol = TransportUDP
init.LogLevel = "bad_log_level"
ExpectedPanicText := "Fatal function called"
panicFunc := func(int) {
panic(ExpectedPanicText)
}
patch := monkey.Patch(os.Exit, panicFunc)
defer patch.Unpatch()
assert.PanicsWithValue(
s.T(),
ExpectedPanicText,
func() {
_ = New(init)
},
"Fatal function was not called")
}
func (s outputSuite) TestNewValidateTransportFatal() {
init := testInit
init.GraylogHost = "127.0.0.1"
init.GraylogPort = 12201
init.GraylogProvider = "TestService"
init.GraylogProtocol = TransportUDP
init.GraylogProtocol = "bad_protocol"
ExpectedPanicText := "Fatal function called"
panicFunc := func(int) {
panic(ExpectedPanicText)
}
patch := monkey.Patch(os.Exit, panicFunc)
defer patch.Unpatch()
assert.PanicsWithValue(
s.T(),
ExpectedPanicText,
func() {
_ = New(init)
},
"Fatal function was not called")
}
func (s outputSuite) TestTracking() {
t := Tracking(1)
s.Equal("output_test.go", t.File)
s.NotEqual(0, t.Line)
s.Equal("graylogger.outputSuite.TestTracking", t.Function)
func() {
t := Tracking(1)
s.Equal("graylogger.outputSuite.TestTracking.func1", t.Function)
}()
func() {
t := Tracking(2)
s.Equal("graylogger.outputSuite.TestTracking", t.Function)
}()
}
func (s outputSuite) TestDiscardOutput() {
g := New(testInit)
g.DiscardOutput()
g.CaptureOutput(testOutputFileName)
g.Debug("test", LevelDebug)
g.Info("test", LevelInfo)
g.Warning("test", LevelWarning)
g.Error("test", LevelError)
g.SaveOutput()
s.Equal("", g.GetOutput())
resetTest(s)
}
func (s outputSuite) TestResetLogger() {
g := New(testInit)
g.DiscardOutput()
g.CaptureOutput(testOutputFileName)
g.Debug("test", LevelDebug)
g.Info("test", LevelInfo)
g.Warning("test", LevelWarning)
g.Error("test", LevelError)
g.SaveOutput()
s.Equal("", g.GetOutput())
g = g.ResetLogger()
g.CaptureOutput(testOutputFileName)
g.Debug("test", LevelDebug)
g.Info("test", LevelInfo)
g.Warning("test", LevelWarning)
g.Error("test", LevelError)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "test :: debug"))
s.Equal(true, strings.Contains(g.GetOutput(), "test :: info"))
s.Equal(true, strings.Contains(g.GetOutput(), "test :: warning"))
s.Equal(true, strings.Contains(g.GetOutput(), "test :: error"))
resetTest(s)
}
func (s outputSuite) TestDebug() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
g.Debug("test", LevelDebug)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "test :: debug"))
resetTest(s)
}
func (s outputSuite) TestInfo() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
g.Info("test", LevelInfo)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "test :: info"))
resetTest(s)
}
func (s outputSuite) TestWarning() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
g.Warning("test", LevelWarning)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "test :: warning"))
resetTest(s)
}
func (s outputSuite) TestLogWarningIfErr() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
err := fmt.Errorf("example error")
g.LogWarningIfErr(err)
g.SaveOutput()
expected := "graylogger.outputSuite.TestLogWarningIfErr :: example error"
s.Equal(true, strings.Contains(g.GetOutput(), expected))
resetTest(s)
}
func (s outputSuite) TestError() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
g.Error("test", LevelError)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "test :: error"))
resetTest(s)
}
func (s outputSuite) TestLogErrorIfErr() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
err := fmt.Errorf("example error")
g.LogErrorIfErr(err)
g.SaveOutput()
expected := "graylogger.outputSuite.TestLogErrorIfErr :: example error"
s.Equal(true, strings.Contains(g.GetOutput(), expected))
resetTest(s)
}
func (s outputSuite) TestFatal() {
init := testInit
ExpectedPanicText := "Fatal function called"
panicFunc := func(int) {
panic(ExpectedPanicText)
}
patch := monkey.Patch(os.Exit, panicFunc)
defer patch.Unpatch()
assert.PanicsWithValue(
s.T(),
ExpectedPanicText,
func() {
g := New(init)
g.CaptureOutput(testOutputFileName)
err := fmt.Errorf("example fatal error")
g.Fatal(err)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "example fatal error"))
},
"Fatal function was not called")
resetTest(s)
}
func (s outputSuite) TestReturnWithError() {
g := New(testInit)
g.CaptureOutput(testOutputFileName)
err := g.ReturnWithError("example", "error")
s.Equal("example :: error", fmt.Sprint(err))
g.SaveOutput()
expected := "example :: error"
s.Equal(true, strings.Contains(g.GetOutput(), expected))
resetTest(s)
}
func (s outputSuite) TestGetInit() {
g := New(testInit)
i := g.GetInit()
s.Equal(LevelDebug, i.LogLevel)
s.Equal("test", i.LogEnv)
s.Equal(false, i.LogColor)
}
func (s outputSuite) TestIsAllowedOutput() {
g := New(testInit)
s.Equal(true, g.IsAllowedOutput())
g.DiscardOutput()
s.Equal(false, g.IsAllowedOutput())
}
func (s outputSuite) TestGetLogLevel() {
g := New(testInit)
LevelNum, levelString := g.GetLogLevel()
s.Equal(7, LevelNum)
s.Equal("debug", levelString)
g.DiscardOutput()
s.Equal(false, g.IsAllowedOutput())
}
func (s outputSuite) TestPrintOutput() {
// 1. Save output
g := New(testInit)
g.CaptureOutput(testOutputFileName)
g.Error("test", LevelError)
g.SaveOutput()
s.Equal(true, strings.Contains(g.GetOutput(), "test :: error"))
// 2. Print saved output from file and capture printed text from stdOut
testPrintOutputFileName := "capture_print_output.out"
captureOutput(testPrintOutputFileName, func() {
g.PrintOutput()
})
// 3. Captured output should match
b, err := ioutil.ReadFile(testPrintOutputFileName)
s.Equal(nil, err)
s.Equal(true, strings.Contains(string(b), "test :: error"))
err = os.Remove(testPrintOutputFileName)
s.Equal(nil, err)
resetTest(s)
}
func resetTest(s outputSuite) {
err := os.Remove(testOutputFileName)
s.Equal(nil, err)
}
func captureOutput(fileName string, f func()) {
reader, writer, _ := os.Pipe()
stdout := os.Stdout
stderr := os.Stderr
defer func() {
os.Stdout = stdout
os.Stderr = stderr
log.SetOutput(os.Stderr)
}()
os.Stdout = writer
os.Stderr = writer
log.SetOutput(writer)
out := make(chan string)
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
var buf bytes.Buffer
wg.Done()
_, _ = io.Copy(&buf, reader)
out <- buf.String()
}()
wg.Wait()
f()
_ = writer.Close()
_ = ioutil.WriteFile(fileName, []byte(<-out), os.ModePerm)
}
func TestOutputSuite(t *testing.T) {
suite.Run(t, new(outputSuite))
}
|
package services
import (
"net/http"
"github.com/rwbailey/microservices/mvc/domain"
"github.com/rwbailey/microservices/mvc/utils"
)
type itemsService struct{}
var (
ItemsService itemsService
)
func (*itemsService) GetItem(itemId int64) (*domain.Item, *utils.ApplicationError) {
return nil, &utils.ApplicationError{
Message: "implement me",
StatusCode: http.StatusInternalServerError,
Code: "internal server error",
}
}
|
package insertSort
// 插入排序
func insertSort(arr []int) {
for i := 1; i < len(arr); i++ {
current := arr[i]
preindex := i - 1
for preindex >= 0 && arr[preindex] > current {
arr[preindex+1] = arr[preindex]
preindex--
}
arr[preindex+1] = current
}
} |
// +build !js
package math4g
import (
"math"
)
const (
uvnan uint32 = 0x7F800001
)
func NaN() Scala {
return Scala(math.Float32frombits(uvnan))
}
func IsNaN(x Scala) bool {
return x != x
}
func Cbrt(x Scala) Scala {
return Scala(math.Cbrt(float64(x)))
}
|
package codequalitybinding
import (
"alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1"
devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned"
"alauda.io/diablo/src/backend/api"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apis/meta/v1"
)
func toDetails(codeQualityBinding *v1alpha1.CodeQualityBinding) *CodeQualityBinding {
crs := CodeQualityBinding{
ObjectMeta: api.NewObjectMeta(codeQualityBinding.ObjectMeta),
TypeMeta: api.NewTypeMeta(api.ResourceKindCodeQualityBinding),
Spec: codeQualityBinding.Spec,
Status: codeQualityBinding.Status,
}
return &crs
}
func GetCodeQualityBinding(client devopsclient.Interface, namespace, name string) (*v1alpha1.CodeQualityBinding, error) {
crs, err := client.DevopsV1alpha1().CodeQualityBindings(namespace).Get(name, api.GetOptionsInCache)
if err != nil {
return nil, err
}
return crs, nil
}
func UpdateCodeQualityBinding(client devopsclient.Interface, oldCodeQualityBinding, newCodeQualityBinding *v1alpha1.CodeQualityBinding) (*v1alpha1.CodeQualityBinding, error) {
glog.V(3).Infof("update the codequalitybinding %s", newCodeQualityBinding.GetName())
binding := oldCodeQualityBinding.DeepCopy()
binding.SetAnnotations(newCodeQualityBinding.GetAnnotations())
binding.Spec = newCodeQualityBinding.Spec
return client.DevopsV1alpha1().CodeQualityBindings(newCodeQualityBinding.Namespace).Update(binding)
}
func CreateCodeQualityBinding(client devopsclient.Interface, codeQualityBinding *v1alpha1.CodeQualityBinding, namespace string) (*v1alpha1.CodeQualityBinding, error) {
glog.V(3).Infof("create the codequalitybinding %s", codeQualityBinding.GetName())
return client.DevopsV1alpha1().CodeQualityBindings(namespace).Create(codeQualityBinding)
}
func DeleteCodeQualityBinding(client devopsclient.Interface, namespace, name string) error {
glog.V(3).Infof("delete the codequalitybinding %s", name)
return client.DevopsV1alpha1().CodeQualityBindings(namespace).Delete(name, &v1.DeleteOptions{})
}
|
package main
import (
"fmt"
"os"
"runtime"
"time"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/host"
"github.com/shirou/gopsutil/net"
"github.com/shirou/gopsutil/v3/mem"
)
type LSysInfo struct {
MemAll uint64
MemFree uint64
MemUsed uint64
MemUsedPercent float64
Days int64
Hours int64
Minutes int64
Seconds int64
CpuUsedPercent float64
OS string
Arch string
CpuCores int
}
func main() {
GetSysInfo()
}
func GetSysInfo() (info LSysInfo) {
unit := uint64(1024 * 1024) // MB
v, _ := mem.VirtualMemory()
info.MemAll = v.Total
info.MemFree = v.Free
info.MemUsed = info.MemAll - info.MemFree
// 注:使用SwapMemory或VirtualMemory,在不同系统中使用率不一样,因此直接计算一次
info.MemUsedPercent = float64(info.MemUsed) / float64(info.MemAll) * 100.0 // v.UsedPercent
info.MemAll /= unit
info.MemUsed /= unit
info.MemFree /= unit
info.OS = runtime.GOOS
info.Arch = runtime.GOARCH
info.CpuCores = runtime.GOMAXPROCS(0)
// 获取200ms内的CPU信息,太短不准确,也可以获几秒内的,但这样会有延时,因为要等待
cc, _ := cpu.Percent(time.Millisecond*200, false)
info.CpuUsedPercent = cc[0]
// 获取开机时间
boottime, _ := host.BootTime()
ntime := time.Now().Unix()
btime := time.Unix(int64(boottime), 0).Unix()
deltatime := ntime - btime
info.Seconds = int64(deltatime)
info.Minutes = info.Seconds / 60
info.Seconds -= info.Minutes * 60
info.Hours = info.Minutes / 60
info.Minutes -= info.Hours * 60
info.Days = info.Hours / 24
info.Hours -= info.Days * 24
fmt.Printf("info: %#v\n", info)
infoTest()
os.Exit(0)
return
}
func infoTest() {
c, _ := cpu.Info()
cc, _ := cpu.Percent(time.Second, false) // 1秒
d, _ := disk.Usage("/")
n, _ := host.Info()
nv, _ := net.IOCounters(true)
physicalCnt, _ := cpu.Counts(false)
logicalCnt, _ := cpu.Counts(true)
if len(c) > 1 {
for _, sub_cpu := range c {
modelname := sub_cpu.ModelName
cores := sub_cpu.Cores
fmt.Printf("CPUs: %v %v cores \n", modelname, cores)
}
} else {
sub_cpu := c[0]
modelname := sub_cpu.ModelName
cores := sub_cpu.Cores
fmt.Printf("CPU: %v %v cores \n", modelname, cores)
}
fmt.Printf("physical count:%d logical count:%d\n", physicalCnt, logicalCnt)
fmt.Printf("CPU Used: used %f%%\n", cc[0])
fmt.Printf("HD: %v GB Free: %v GB Usage:%f%%\n", d.Total/1024/1024/1024, d.Free/1024/1024/1024, d.UsedPercent)
fmt.Printf("OS: %v(%v) %v\n", n.Platform, n.PlatformFamily, n.PlatformVersion)
fmt.Printf("Hostname: %v\n", n.Hostname)
fmt.Printf("Network: %v bytes / %v bytes\n", nv[0].BytesRecv, nv[0].BytesSent)
}
|
package tests
import (
M "github.com/ionous/sashimi/compiler/model"
. "github.com/ionous/sashimi/script"
"github.com/ionous/sashimi/util/ident"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
)
//
// create a single subclass called stories
func TestRelation(t *testing.T) {
s := Script{}
s.The("kinds",
Called("gremlins"),
HaveMany("pets", "rocks").
Implying("rocks", HaveOne("o beneficent one", "gremlin")),
// alternate, non-conflicting specification of the same relation
HaveMany("pets", "rocks").
// FIX? if the names don't match, this creates two views of the same relation.
// validate the hierarchy to verify no duplicate property usage?
Implying("rocks", HaveOne("o beneficent one", "gremlin")),
)
s.The("kinds", Called("rocks"), Exist())
res, err := s.Compile(Log(t))
if assert.NoError(t, err) {
model := res.Model
model.PrintModel(t.Log)
assert.Equal(t, 1, len(model.Relations))
for _, v := range model.Relations {
assert.EqualValues(t, "GremlinsPets", v.Source.String())
assert.EqualValues(t, "RocksOBeneficentOne", v.Target.String())
assert.EqualValues(t, M.OneToMany, v.Style)
}
}
}
//
// create a single subclass called stories
func TestRelates(t *testing.T) {
s := Script{}
s.The("kinds",
Called("gremlins"),
HaveMany("pets", "rocks").
Implying("rocks", HaveOne("o beneficent one", "gremlin")),
)
s.The("kinds", Called("rocks"), Exist())
// FIX: for now the property names must match,
// i'd prefer the signular: Has("pet", "Loofah")
s.The("gremlin", Called("Claire"), Has("pets", "Loofah"))
s.The("rock", Called("Loofah"), Exists())
//
res, err := s.Compile(Log(t))
if assert.NoError(t, err, "compile") {
model := res.Model
assert.Equal(t, 2, len(model.Instances), "two instances")
claire, ok := model.Instances[ident.MakeId("claire")]
require.True(t, ok, "found claire")
gremlins, ok := model.Classes[claire.Class]
require.True(t, ok, "found gremlins")
petsrel, ok := gremlins.FindProperty("pets")
assert.True(t, ok)
assert.True(t, !petsrel.Relation.Empty())
loofah, ok := model.Instances[ident.MakeId("Loofah")]
assert.True(t, ok, "found loofah")
rocks, ok := model.Classes[loofah.Class]
require.True(t, ok, "found rocks")
gremlinrel, ok := rocks.FindProperty("o beneficent one")
require.True(t, ok, "found benes")
omygremlin, ok := loofah.Values[gremlinrel.Id]
require.True(t, ok, "found grem")
assert.EqualValues(t, claire.Id, omygremlin)
}
}
|
package release
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/blang/semver/v4"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
_ "k8s.io/klog/v2" // integration tests set glog flags.
)
func TestGetUpdates(t *testing.T) {
arch := "test-arch"
channelName := "stable-4.0"
tests := []struct {
name string
version string
reqVer string
expectedQuery string
current Update
requested Update
available []Update
err string
}{{
name: "Valid/DirectUpdate",
version: "4.0.0-4",
reqVer: "4.0.0-5",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab&version=4.0.0-4",
current: Update{Version: semver.MustParse("4.0.0-4"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-4"},
requested: Update{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
available: []Update{
{Version: semver.MustParse("4.0.0-4"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-4"},
{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
},
}, {
name: "Valid/FullChannel",
version: "4.0.0-4",
reqVer: "4.0.0-8",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab&version=4.0.0-4",
current: Update{Version: semver.MustParse("4.0.0-4"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-4"},
requested: Update{Version: semver.MustParse("4.0.0-8"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-8"},
available: []Update{
{Version: semver.MustParse("4.0.0-4"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-4"},
{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
{Version: semver.MustParse("4.0.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-6"},
{Version: semver.MustParse("4.0.0-8"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-8"},
},
}, {
name: "Valid/NoUpdates",
version: "4.0.0-4",
reqVer: "4.0.0-0.okd-0",
current: Update{Version: semver.MustParse("4.0.0-4"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-4"},
requested: Update{Version: semver.MustParse("4.0.0-0.okd-0"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-0.okd-0"},
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab&version=4.0.0-4",
available: nil,
}, {
name: "Invalid/UnknownCurrentVersion",
version: "4.0.0-3",
reqVer: "0.0.0",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab&version=4.0.0-3",
err: "VersionNotFound: current version 4.0.0-3 not found in the \"stable-4.0\" channel",
}, {
name: "Invalid/UnknownRequestedVersion",
version: "4.0.0-5",
reqVer: "4.0.0-9",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab&version=4.0.0-5",
err: "VersionNotFound: requested version 4.0.0-9 not found in the \"stable-4.0\" channel",
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requestQuery := make(chan string, 1)
defer close(requestQuery)
handler := getHandlerMulti(t, requestQuery)
ts := httptest.NewServer(http.HandlerFunc(handler))
t.Cleanup(ts.Close)
endpoint, err := url.Parse(ts.URL)
require.NoError(t, err)
c := &mockClient{url: endpoint}
current, requested, updates, err := GetUpdates(context.Background(), c, arch, channelName, semver.MustParse(test.version), semver.MustParse(test.reqVer))
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.current, current)
require.Equal(t, test.requested, requested)
require.Equal(t, test.available, updates)
} else {
require.EqualError(t, err, test.err)
}
actualQuery := ""
select {
case actualQuery = <-requestQuery:
default:
t.Fatal("no request received at upstream URL")
}
expectedQueryValues, err := url.ParseQuery(test.expectedQuery)
require.NoError(t, err)
actualQueryValues, err := url.ParseQuery(actualQuery)
require.NoError(t, err)
require.Equal(t, expectedQueryValues, actualQueryValues)
})
}
}
func TestGetMinorMax(t *testing.T) {
arch := "test-arch"
channelName := "stable-4.0"
tests := []struct {
name string
expectedQuery string
version semver.Version
min bool
err string
}{{
name: "Valid/MaxChannel",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab",
version: semver.MustParse("4.0.0-8"),
}, {
name: "Valid/MinChannel",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab",
version: semver.MustParse("4.0.0-0.okd-0"),
min: true,
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requestQuery := make(chan string, 1)
defer close(requestQuery)
handler := getHandlerMulti(t, requestQuery)
ts := httptest.NewServer(http.HandlerFunc(handler))
t.Cleanup(ts.Close)
endpoint, err := url.Parse(ts.URL)
require.NoError(t, err)
c := &mockClient{url: endpoint}
version, err := GetChannelMinOrMax(context.Background(), c, arch, channelName, test.min)
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.version, version)
} else {
require.EqualError(t, err, test.err)
}
actualQuery := ""
select {
case actualQuery = <-requestQuery:
default:
t.Fatal("no request received at upstream URL")
}
expectedQueryValues, err := url.ParseQuery(test.expectedQuery)
require.NoError(t, err)
actualQueryValues, err := url.ParseQuery(actualQuery)
require.NoError(t, err)
require.Equal(t, expectedQueryValues, actualQueryValues)
})
}
}
func TestGetVersions(t *testing.T) {
tests := []struct {
name string
channel string
arch string
expectedQuery string
versions []semver.Version
err string
}{
{
name: "Valid/OneChannel",
channel: "stable-4.0",
arch: "test-arch",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab",
versions: getSemVers([]string{"4.0.0-0.okd-0", "4.0.0-4", "4.0.0-5", "4.0.0-6", "4.0.0-7", "4.0.0-8"}),
},
{
name: "Invalid/EmptyChannel",
channel: "empty-4.0",
arch: "test-arch",
expectedQuery: "arch=test-arch&channel=empty-4.0&id=01234567-0123-0123-0123-0123456789ab",
err: "NoVersionsFound: no cluster versions found in the \"empty-4.0\" channel",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requestQuery := make(chan string, 1)
defer close(requestQuery)
handler := getHandlerMulti(t, requestQuery)
ts := httptest.NewServer(http.HandlerFunc(handler))
t.Cleanup(ts.Close)
endpoint, err := url.Parse(ts.URL)
require.NoError(t, err)
c := &mockClient{url: endpoint}
versions, err := GetVersions(context.Background(), c, test.arch, test.channel)
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.versions, versions)
} else {
require.EqualError(t, err, test.err)
}
actualQuery := ""
select {
case actualQuery = <-requestQuery:
default:
t.Fatal("no request received at upstream URL")
}
expectedQueryValues, err := url.ParseQuery(test.expectedQuery)
require.NoError(t, err)
actualQueryValues, err := url.ParseQuery(actualQuery)
require.NoError(t, err)
require.Equal(t, expectedQueryValues, actualQueryValues)
})
}
}
func TestGetUpdatesInRange(t *testing.T) {
arch := "test-arch"
channelName := "stable-4.0"
tests := []struct {
name string
expectedQuery string
versions []Update
releaseRange semver.Range
err string
}{{
name: "Valid/OneChannel",
expectedQuery: "arch=test-arch&channel=stable-4.0&id=01234567-0123-0123-0123-0123456789ab",
versions: []Update{
{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
{Version: semver.MustParse("4.0.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-6"},
{Version: semver.MustParse("4.0.0-7"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-7"},
{Version: semver.MustParse("4.0.0-8"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-8"},
},
releaseRange: semver.MustParseRange(">=4.0.0-5"),
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requestQuery := make(chan string, 1)
defer close(requestQuery)
handler := getHandlerMulti(t, requestQuery)
ts := httptest.NewServer(http.HandlerFunc(handler))
t.Cleanup(ts.Close)
endpoint, err := url.Parse(ts.URL)
require.NoError(t, err)
c := &mockClient{url: endpoint}
versions, err := GetUpdatesInRange(context.TODO(), c, channelName, arch, test.releaseRange)
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.versions, versions)
} else {
require.EqualError(t, err, test.err)
}
actualQuery := ""
select {
case actualQuery = <-requestQuery:
default:
t.Fatal("no request received at upstream URL")
}
expectedQueryValues, err := url.ParseQuery(test.expectedQuery)
require.NoError(t, err)
actualQueryValues, err := url.ParseQuery(actualQuery)
require.NoError(t, err)
require.Equal(t, expectedQueryValues, actualQueryValues)
})
}
}
func TestCalculateUpgrades(t *testing.T) {
arch := "test-arch"
tests := []struct {
name string
sourceChannel string
targetChannel string
curr semver.Version
req semver.Version
currentUpdate Update
requestedUpdate Update
neededUpdates []Update
err string
}{{
name: "Success/OneChannel",
sourceChannel: "stable-4.0",
targetChannel: "stable-4.1",
curr: semver.MustParse("4.0.0-5"),
req: semver.MustParse("4.1.0-6"),
currentUpdate: Update{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
requestedUpdate: Update{Version: semver.MustParse("4.1.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.1.0-6"},
neededUpdates: []Update{
{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
{Version: semver.MustParse("4.0.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-6"},
{Version: semver.MustParse("4.0.0-8"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-8"},
{Version: semver.MustParse("4.1.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.1.0-6"},
},
}, {
name: "Success/TwoChannels",
sourceChannel: "stable-4.0",
targetChannel: "stable-4.2",
curr: semver.MustParse("4.0.0-5"),
req: semver.MustParse("4.2.0-3"),
currentUpdate: Update{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
requestedUpdate: Update{Version: semver.MustParse("4.2.0-3"), Image: "quay.io/openshift-release-dev/ocp-release:4.2.0-3"},
neededUpdates: []Update{
{Version: semver.MustParse("4.0.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5"},
{Version: semver.MustParse("4.0.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-6"},
{Version: semver.MustParse("4.0.0-8"), Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-8"},
{Version: semver.MustParse("4.1.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.1.0-6"},
{Version: semver.MustParse("4.2.0-3"), Image: "quay.io/openshift-release-dev/ocp-release:4.2.0-3"},
},
}, {
name: "Success/TwoChannelsDifferentPrefix",
sourceChannel: "stable-4.3",
targetChannel: "fast-4.3",
curr: semver.MustParse("4.3.0"),
req: semver.MustParse("4.3.1"),
currentUpdate: Update{Version: semver.MustParse("4.3.0"), Image: "quay.io/openshift-release-dev/ocp-release:4.3.0"},
requestedUpdate: Update{Version: semver.MustParse("4.3.1"), Image: "quay.io/openshift-release-dev/ocp-release:4.3.1"},
neededUpdates: []Update{
{Version: semver.MustParse("4.3.0"), Image: "quay.io/openshift-release-dev/ocp-release:4.3.0"},
{Version: semver.MustParse("4.3.1"), Image: "quay.io/openshift-release-dev/ocp-release:4.3.1"},
},
}, {
name: "SuccessWithWarning/NoUpgradePath",
sourceChannel: "stable-4.1",
targetChannel: "stable-4.2",
curr: semver.MustParse("4.1.0-6"),
req: semver.MustParse("4.2.0-2"),
currentUpdate: Update{Version: semver.MustParse("4.1.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.1.0-6"},
requestedUpdate: Update{Version: semver.MustParse("4.2.0-2"), Image: "quay.io/openshift-release-dev/ocp-release:4.2.0-2"},
neededUpdates: []Update{
{Version: semver.MustParse("4.1.0-6"), Image: "quay.io/openshift-release-dev/ocp-release:4.1.0-6"},
},
}, {
name: "SuccessWithWarning/BlockedEdge",
sourceChannel: "stable-4.2",
targetChannel: "stable-4.3",
curr: semver.MustParse("4.2.0-3"),
req: semver.MustParse("4.3.0"),
currentUpdate: Update{Version: semver.MustParse("4.2.0-3"), Image: "quay.io/openshift-release-dev/ocp-release:4.2.0-3"},
requestedUpdate: Update{Version: semver.MustParse("4.3.0"), Image: "quay.io/openshift-release-dev/ocp-release:4.3.0"},
neededUpdates: []Update{
{Version: semver.MustParse("4.2.0-3"), Image: "quay.io/openshift-release-dev/ocp-release:4.2.0-3"},
{Version: semver.MustParse("4.2.0-5"), Image: "quay.io/openshift-release-dev/ocp-release:4.2.0-5"},
},
}, {
name: "Failure/InvalidLastVersion",
sourceChannel: "stable-4.2",
targetChannel: "stable-4.3",
curr: semver.MustParse("4.2.0-9"),
req: semver.MustParse("4.3.4"),
err: "channel \"stable-4.2\": VersionNotFound: current version 4.2.0-9 not found in the \"stable-4.2\" channel",
}, {
name: "Failure/InvalidRequestedVersion",
sourceChannel: "stable-4.2",
targetChannel: "stable-4.3",
curr: semver.MustParse("4.2.0-3"),
req: semver.MustParse("4.3.5"),
err: "VersionNotFound: current version 4.3.5 not found in the \"stable-4.3\" channel",
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requestQuery := make(chan string, 10)
defer close(requestQuery)
handler := getHandlerMulti(t, requestQuery)
ts := httptest.NewServer(http.HandlerFunc(handler))
t.Cleanup(ts.Close)
endpoint, err := url.Parse(ts.URL)
require.NoError(t, err)
cur, req, updates, err := CalculateUpgrades(context.Background(), &mockClient{url: endpoint}, arch, test.sourceChannel, test.targetChannel, test.curr, test.req)
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.currentUpdate, cur)
require.Equal(t, test.requestedUpdate, req)
require.Equal(t, test.neededUpdates, updates)
} else {
require.EqualError(t, err, test.err)
}
})
}
}
func TestHandleBlockedEdges(t *testing.T) {
arch := "test-arch"
tests := []struct {
name string
sourceChannel string
targetChannel string
last semver.Version
req semver.Version
exp bool
err string
}{{
name: "Success/OneChannel",
sourceChannel: "stable-4.0",
targetChannel: "stable-4.1",
last: semver.MustParse("4.0.0-5"),
req: semver.MustParse("4.1.0-6"),
exp: false,
}, {
name: "Success/TwoChannelsDifferentPrefix",
sourceChannel: "stable-4.3",
targetChannel: "fast-4.3",
last: semver.MustParse("4.3.0"),
req: semver.MustParse("4.3.1"),
exp: false,
}, {
name: "SuccessWithWarning/NoUpgradePath",
sourceChannel: "stable-4.1",
targetChannel: "stable-4.2",
last: semver.MustParse("4.1.0-6"),
req: semver.MustParse("4.2.0-2"),
exp: false,
}, {
name: "SuccessWithWarning/BlockedEdge",
sourceChannel: "stable-4.2",
targetChannel: "stable-4.3",
last: semver.MustParse("4.2.0-3"),
req: semver.MustParse("4.3.0"),
exp: true,
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
requestQuery := make(chan string, 10)
defer close(requestQuery)
handler := getHandlerMulti(t, requestQuery)
ts := httptest.NewServer(http.HandlerFunc(handler))
t.Cleanup(ts.Close)
endpoint, err := url.Parse(ts.URL)
require.NoError(t, err)
isBlocked, err := handleBlockedEdges(context.Background(), &mockClient{url: endpoint}, arch, test.targetChannel, test.last)
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.exp, isBlocked)
} else {
require.EqualError(t, err, test.err)
}
})
}
}
type mockClient struct {
url *url.URL
Fail bool
}
var _ Client = &mockClient{}
func (c mockClient) GetID() uuid.UUID {
return uuid.MustParse("01234567-0123-0123-0123-0123456789ab")
}
func (c mockClient) SetQueryParams(arch, channel, version string) {
queryParams := c.url.Query()
queryParams.Add("id", c.GetID().String())
params := map[string]string{
"arch": arch,
"channel": channel,
"version": version,
}
for key, value := range params {
if value != "" {
queryParams.Add(key, value)
}
}
c.url.RawQuery = queryParams.Encode()
}
func (c mockClient) GetURL() *url.URL {
return c.url
}
func (c mockClient) GetTransport() *http.Transport {
return &http.Transport{}
}
func TestNodeUnmarshalJSON(t *testing.T) {
tests := []struct {
raw []byte
exp node
err string
}{{
raw: []byte(`{
"version": "4.0.0-5",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-5",
"metadata": {}
}`),
exp: node{
Version: semver.MustParse("4.0.0-5"),
Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-5",
Metadata: map[string]string{},
},
}, {
raw: []byte(`{
"version": "4.0.0-0.1",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-0.1",
"metadata": {
"description": "This is the beta1 image based on the 4.0.0-0.nightly-2019-01-15-010905 build"
}
}`),
exp: node{
Version: semver.MustParse("4.0.0-0.1"),
Image: "quay.io/openshift-release-dev/ocp-release:4.0.0-0.1",
Metadata: map[string]string{
"description": "This is the beta1 image based on the 4.0.0-0.nightly-2019-01-15-010905 build",
},
},
}, {
raw: []byte(`{
"version": "v4.0.0-0.1",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-0.1",
"metadata": {
"description": "This is the beta1 image based on the 4.0.0-0.nightly-2019-01-15-010905 build"
}
}`),
err: `Invalid character(s) found in major number "v4"`,
}, {
raw: []byte(`{
"version": "4-0-0+0.1",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-0.1",
"metadata": {
"description": "This is the beta1 image based on the 4.0.0-0.nightly-2019-01-15-010905 build"
}
}
`),
err: "No Major.Minor.Patch elements found",
}}
for idx, test := range tests {
t.Run(fmt.Sprintf("#%d", idx), func(t *testing.T) {
var n node
err := json.Unmarshal(test.raw, &n)
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.exp, n)
} else {
require.EqualError(t, err, test.err)
}
})
}
}
func TestGetSemVerFromChannel(t *testing.T) {
tests := []struct {
name string
sourceChannel string
targetChannel string
err string
expSource semver.Version
expTarget semver.Version
expPrefix string
}{
{
name: "Valid/StableChannels",
sourceChannel: "stable-4.1",
targetChannel: "fast-4.2",
expSource: semver.MustParse("4.1.0"),
expTarget: semver.MustParse("4.2.0"),
expPrefix: "stable",
},
{
name: "Invalid/InvalidChannelPrefix",
sourceChannel: "stable-4.1",
targetChannel: "fast:4.2",
err: "invalid channel name fast:4.2",
},
}
for _, test := range tests {
source, target, prefix, err := getSemverFromChannels(test.sourceChannel, test.targetChannel)
t.Run(test.name, func(t *testing.T) {
if test.err == "" {
require.NoError(t, err)
require.Equal(t, test.expPrefix, prefix)
require.Equal(t, test.expSource, source)
require.Equal(t, test.expTarget, target)
} else {
require.EqualError(t, err, test.err)
}
})
}
}
func getSemVers(stringVers []string) (vers []semver.Version) {
for _, stringVer := range stringVers {
vers = append(vers, semver.MustParse(stringVer))
}
return vers
}
// getHanlderMulti mocks a multi channel Cincinnati API
func getHandlerMulti(t *testing.T, requestQuery chan<- string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
select {
case requestQuery <- r.URL.RawQuery:
default:
//t.Fatalf("received multiple requests at upstream URL")
}
if r.Method != http.MethodGet && r.Method != http.MethodHead {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
mtype := r.Header.Get("Accept")
if mtype != GraphMediaType {
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
keys, ok := r.URL.Query()["channel"]
if !ok {
t.Fail()
}
ch := keys[len(keys)-1]
switch {
case ch == "empty-4.0":
_, err := w.Write([]byte(`{
"nodes": [],
"edges": []
}`))
if err != nil {
t.Fatal(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
case ch == "stable-4.0":
_, err := w.Write([]byte(`{
"nodes": [
{
"version": "4.0.0-4",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-4"
},
{
"version": "4.0.0-5",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-5"
},
{
"version": "4.0.0-6",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-6"
},
{
"version": "4.0.0-0.okd-0",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-0.okd-0"
},
{
"version": "4.0.0-7",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-7"
},
{
"version": "4.0.0-8",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-8"
}
],
"edges": [[0,1],[1,2],[2,4],[2,5],[4,5]]
}`))
if err != nil {
t.Fatal(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
case ch == "stable-4.1":
_, err := w.Write([]byte(`{
"nodes": [
{
"version": "4.0.0-4",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-4"
},
{
"version": "4.0.0-5",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-5"
},
{
"version": "4.0.0-6",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-6"
},
{
"version": "4.0.0-0.okd-0",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-0.okd-0"
},
{
"version": "4.0.0-7",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-7"
},
{
"version": "4.0.0-8",
"payload": "quay.io/openshift-release-dev/ocp-release:4.0.0-8"
},
{
"version": "4.1.0-6",
"payload": "quay.io/openshift-release-dev/ocp-release:4.1.0-6"
}
],
"edges": [[0,1],[1,2],[2,4],[2,5],[4,5],[5,6]]
}`))
if err != nil {
t.Fatal(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
case ch == "stable-4.2":
_, err := w.Write([]byte(`{
"nodes": [
{
"version": "4.1.0-6",
"payload": "quay.io/openshift-release-dev/ocp-release:4.1.0-6"
},
{
"version": "4.2.0-2",
"payload": "quay.io/openshift-release-dev/ocp-release:4.2.0-2"
},
{
"version": "4.2.0-3",
"payload": "quay.io/openshift-release-dev/ocp-release:4.2.0-3"
},
{
"version": "4.2.0-5",
"payload": "quay.io/openshift-release-dev/ocp-release:4.2.0-5"
}
],
"edges": [[0,2],[1,2],[2,3]]
}`))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Fatal(err)
return
}
case ch == "stable-4.3":
_, err := w.Write([]byte(`{
"nodes": [
{
"version": "4.3.0",
"payload": "quay.io/openshift-release-dev/ocp-release:4.3.0"
},
{
"version": "4.3.1",
"payload": "quay.io/openshift-release-dev/ocp-release:4.3.1"
}
],
"edges": [[0,1]]
}`))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Fatal(err)
return
}
case ch == "fast-4.3":
_, err := w.Write([]byte(`{
"nodes": [
{
"version": "4.2.0-5",
"payload": "quay.io/openshift-release-dev/ocp-release:4.2.0-5"
},
{
"version": "4.3.0",
"payload": "quay.io/openshift-release-dev/ocp-release:4.3.0"
},
{
"version": "4.3.1",
"payload": "quay.io/openshift-release-dev/ocp-release:4.3.1"
},
{
"version": "4.3.2",
"payload": "quay.io/openshift-release-dev/ocp-release:4.3.2"
}
],
"edges": [[0,1],[1,2],[2,3]]
}`))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Fatal(err)
return
}
default:
t.Fail()
}
}
}
|
package main
import (
"fmt"
)
func main() {
t := []float32{1.2,3.2,5.4}
fmt.Println(Sum(t))
}
func Sum(arrF []float32) (s float32) {
for _, v := range arrF {
s += v
}
return
}
|
package dao
import (
"ego-user-service/utils/uuid"
"errors"
"fmt"
"github.com/go-log/log"
userInfoProto "github.com/qianxunke/ego-shopping/ego-common-protos/go_out/user/user_info"
"github.com/qianxunke/ego-shopping/ego-plugins/db"
"net/http"
)
func UserIsExit(userName string) (u *userInfoProto.UserInf, err error) {
DB := db.MasterEngine()
u = &userInfoProto.UserInf{}
err = DB.Table("user_infs").Where("user_name = ?", userName).Scan(&u).Error
if err != nil {
return nil, err
}
if len(u.UserId) <= 0 {
return nil, errors.New("user no exit !")
}
return u, err
}
func UpdateUserInfo(userId string, userMap map[string]interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if err == nil {
err = errors.New(fmt.Sprintf("%v", r))
}
}
}()
DB := db.MasterEngine()
err = DB.Model(&userInfoProto.UserInf{}).Where("user_id =?", userId).Updates(userMap, true).Error
return err
}
func GetUserInfoList(searchKey string, startTime string, endTime string, pages int64, limit int64) (rsp *userInfoProto.OutGetUserInfoList) {
DB := db.MasterEngine()
rsp = &userInfoProto.OutGetUserInfoList{}
var err error
if len(searchKey) == 0 {
if len(startTime) > 0 && len(endTime) == 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("register_time > ?", endTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Where("register_time > ? ", startTime).Order("user_id desc").Offset((-1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
} else if len(startTime) == 0 && len(endTime) > 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("register_time < ? ", endTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Where("register_time < ? ", endTime).Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
} else if len(startTime) > 0 && len(endTime) > 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("register_time between ? and ?", startTime, endTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Where("register_time between ? and ?", startTime, endTime).Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
} else {
//先统计
err = DB.Model(&userInfoProto.UserInf{}).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
}
} else {
key := "%" + searchKey + "%"
if len(startTime) > 0 && len(endTime) == 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("(nike_name like ? or user_name like ? or mobile_phone like ?) and register_time > ? ", key, key, key, startTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("(nike_name like ? or user_name like ? or mobile_phone like ?) and register_time > ? ", key, key, key, startTime).Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
} else if len(startTime) == 0 && len(endTime) > 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("(nike_name like ? or user_name like ? or mobile_phone like ?) and register_time < ? ", key, key, key, endTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Where("(nike_name like ? or user_name like ? or mobile_phone like ?) and register_time < ? ", key, key, key, endTime).Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
} else if len(startTime) > 0 && len(endTime) > 0 {
err = DB.Model(&userInfoProto.UserInf{}).Where("(nike_name like ? or user_name like ? or mobile_phone like ?) and register_time between ? and ?", key, key, key, startTime, endTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Where("(nike_name like ? or user_name like ? or mobile_phone like ?) and register_time between ? and ?", key, key, key, startTime, endTime).Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
} else {
err = DB.Model(&userInfoProto.UserInf{}).Where("1=1 and (nike_name like ? or user_name like ? or mobile_phone like ?)", key, key, key, startTime).Order("user_id desc").Count(&rsp.Total).Error
if err == nil && rsp.Total > 0 {
err = DB.Where("1=1 and (nike_name like ? or user_name like ? or mobile_phone like ?)", key, key, key, startTime).Order("user_id desc").Offset((pages - 1) * limit).Limit(limit).Find(&rsp.UserInfList).Error
}
}
}
if err != nil {
log.Logf("ERROR: %v", err)
rsp.Error = &userInfoProto.Error{
Code: http.StatusInternalServerError,
Message: err.Error(),
}
return
}
rsp.Limit = limit
rsp.Pages = pages
return
}
/**
get userinfo by id
*/
func GetUserInfoById(userId string) (user *userInfoProto.UserInf, err error) {
DB := db.MasterEngine()
user = &userInfoProto.UserInf{}
err = DB.Where("user_id = ?", userId).First(&user).Error
return
}
/**
user exis?
*/
func GetUserInfoByPhoneOrUserName(mobilePhone string, userName string) (user *userInfoProto.UserInf, err error) {
DB := db.MasterEngine()
user = &userInfoProto.UserInf{}
err = DB.Where(" mobile_phone = ? or user_name= ?", mobilePhone, userName).First(&user).Error
return
}
func Insert(user *userInfoProto.UserInf) (err error) {
user.UserId = uuid.GetUuid()
DB := db.MasterEngine()
err = DB.Create(user).Error
return
}
|
/*
Copyright 2021-2023 ICS-FORTH.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"strings"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
// +kubebuilder:webhook:path=/mutate-frisbee-dev-v1alpha1-scenario,mutating=true,failurePolicy=fail,sideEffects=None,groups=frisbee.dev,resources=scenarios,verbs=create;update,versions=v1alpha1,name=mscenario.kb.io,admissionReviewVersions={v1,v1alpha1}
var _ webhook.Defaulter = &Scenario{}
// +kubebuilder:webhook:path=/validate-frisbee-dev-v1alpha1-scenario,mutating=false,failurePolicy=fail,sideEffects=None,groups=frisbee.dev,resources=scenarios,verbs=create,versions=v1alpha1,name=vscenario.kb.io,admissionReviewVersions={v1,v1alpha1}
var _ webhook.Validator = &Scenario{}
// log is for logging in this package.
var scenariolog = logf.Log.WithName("scenario-hook")
func (in *Scenario) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
For(in).
Complete()
}
// Default implements webhook.Defaulter so a webhook will be registered for the type.
func (in *Scenario) Default() {
scenariolog.Info("default", "name", in.Name)
// Align Inputs with MaxInstances
for i := 0; i < len(in.Spec.Actions); i++ {
action := &in.Spec.Actions[i]
switch action.ActionType {
case ActionService:
if err := action.Service.Prepare(false); err != nil {
scenariolog.Error(err, "definition error", "action", action.Name)
}
case ActionCluster:
if err := action.Cluster.GenerateObjectFromTemplate.Prepare(true); err != nil {
scenariolog.Error(err, "definition error", "action", action.Name)
}
case ActionChaos:
if err := action.Chaos.Prepare(false); err != nil {
scenariolog.Error(err, "definition error", "action", action.Name)
}
case ActionCascade:
if err := action.Cascade.GenerateObjectFromTemplate.Prepare(true); err != nil {
scenariolog.Error(err, "definition error", "action", action.Name)
}
case ActionCall, ActionDelete:
// calls and deletes do not involve templates.
continue
}
}
}
// ValidateCreate implements webhook.Validator so a webhook will be registered for the type.
func (in *Scenario) ValidateCreate() (admission.Warnings, error) {
legitReferences, err := BuildDependencyGraph(in)
if err != nil {
return nil, errors.Wrapf(err, "invalid scenario [%s]", in.GetName())
}
for i, action := range in.Spec.Actions {
// Check that expressions used in the assertions are ok
if !action.Assert.IsZero() {
if err := ValidateExpr(action.Assert); err != nil {
return nil, errors.Wrapf(err, "Invalid expr in assertion")
}
}
// Ensure that the type of action is supported and is correctly set
if err := CheckAction(&in.Spec.Actions[i], legitReferences); err != nil {
return nil, errors.Wrapf(err, "incorrent spec for type [%s] of action [%s]", action.ActionType, action.Name)
}
}
if err := CheckForBoundedExecution(legitReferences); err != nil {
return nil, errors.Wrapf(err, "infinity error")
}
return nil, nil
}
// BuildDependencyGraph validates the execution workflow.
// 1. Ensures that action names are qualified (since they are used as generators to jobs)
// 2. Ensures that there are no two actions with the same name.
// 3. Ensure that dependencies point to a valid action.
// 4. Ensure that macros point to a valid action.
func BuildDependencyGraph(scenario *Scenario) (map[string]*Action, error) {
// callIndex maintains a map of all the action in the scenario
callIndex := make(map[string]*Action, len(scenario.Spec.Actions))
// prepare a dependency graph
for i, action := range scenario.Spec.Actions {
// Because the action name will be the "matrix" for generating addressable jobs,
// it must adhere to certain properties.
if errs := validation.IsDNS1123Subdomain(action.Name); errs != nil {
err := errors.New(strings.Join(errs, "; "))
return nil, errors.Wrapf(err, "invalid actioname %s", action.Name)
}
// validate references dependencies
if deps := action.DependsOn; deps != nil {
for _, dep := range deps.Running {
if _, exists := callIndex[dep]; !exists {
return nil, errors.Errorf("invalid running dependency: [%s]<-[%s]", action.Name, dep)
}
}
for _, dep := range deps.Success {
if _, exists := callIndex[dep]; !exists {
return nil, errors.Errorf("invalid success dependency: [%s]<-[%s]", action.Name, dep)
}
}
}
// update calling map
if _, exists := callIndex[action.Name]; !exists {
callIndex[action.Name] = &scenario.Spec.Actions[i]
} else {
return nil, errors.Errorf("Duplicate action '%s'", action.Name)
}
}
return callIndex, nil
}
func CheckForBoundedExecution(callIndex map[string]*Action) error {
// Use transactions as a means to detect looping containers that never terminate within
// the lifespan of the scenario. If so, the experiment never ends and waste resources.
// The idea is find which Actions (Services, Clusters, ...) are not referenced by a
// terminal dependency condition (e.g, success), and mark as suspects for looping.
jobCompletionIndex := make(map[string]bool, len(callIndex))
// Mark every action as uncompleted.
for _, action := range callIndex {
jobCompletionIndex[action.Name] = false
}
// Do a mockup "run" and mark completed jobs
for _, action := range callIndex {
// Successful actions are regarded as completed.
if deps := action.DependsOn; deps != nil {
for _, dep := range deps.Success {
if _, exists := callIndex[dep]; !exists {
return errors.Errorf("invalid success dependency [%s]<-[%s]", action.Name, dep)
}
jobCompletionIndex[dep] = true
}
}
// Deleted actions are regarded as completed.
if action.ActionType == ActionDelete {
for _, job := range action.Delete.Jobs {
completed, exists := jobCompletionIndex[job]
if !exists {
return errors.Errorf("internal error. job '%s' does not exist. This should be captured by reference graph", job)
}
if completed {
return errors.Errorf("action.[%s].Delete[%s] deletes an already completed job", action.Name, job)
}
// mark the job as completed
jobCompletionIndex[job] = true
}
// If it's a Teardown action, mark it as completed.
if action.ActionType == ActionDelete && action.Name == "teardown" {
jobCompletionIndex[action.Name] = true
}
}
}
// Find jobs are that not completed
var nonCompleted []string
for actionName, completed := range jobCompletionIndex {
if !completed {
nonCompleted = append(nonCompleted, actionName)
}
}
if len(nonCompleted) > 0 {
return errors.Errorf("actions '%s' are neither completed nor waited at the end of the scenario", nonCompleted)
}
return nil
}
func CheckAction(action *Action, references map[string]*Action) error {
if action == nil || action.EmbedActions == nil {
return errors.Errorf("empty definition")
}
switch action.ActionType {
case ActionService:
if action.EmbedActions.Service == nil {
return errors.Errorf("empty service definition")
}
return nil
case ActionCluster:
if action.EmbedActions.Cluster == nil {
return errors.Errorf("empty cluster definition")
}
var cluster Cluster
cluster.Spec = *action.EmbedActions.Cluster
_, err := cluster.ValidateCreate()
if err != nil {
return errors.Wrapf(err, "cluster error")
}
// validated here because it involves references to other actions.
if placement := cluster.Spec.Placement; placement != nil {
if err := ValidatePlacement(placement, references); err != nil {
return errors.Wrapf(err, "placement error")
}
}
return nil
case ActionChaos:
if action.EmbedActions.Chaos == nil {
return errors.Errorf("empty chaos definition")
}
/*
if spec.Type == v1alpha1.FaultKill {
if action.DependsOn.Success != nil {
return nil, errors.Errorf("kill is a inject-only chaos. it does not have success. only running")
}
}
*/
return nil
case ActionCascade:
if action.EmbedActions.Cascade == nil {
return errors.Errorf("empty cascade definition")
}
var cascade Cascade
cascade.Spec = *action.EmbedActions.Cascade
_, err := cascade.ValidateCreate()
return err
case ActionDelete:
if action.EmbedActions.Delete == nil {
return errors.Errorf("empty delete definition")
}
// Check that references jobs exist and there are no cycle deletions
for _, job := range action.EmbedActions.Delete.Jobs {
target, exists := references[job]
if !exists {
return errors.Errorf("referenced job '%s' does not exist", job)
}
if target.ActionType == ActionDelete {
return errors.Errorf("cycle deletion. referected job '%s' should not be a deletion job", job)
}
}
return nil
case ActionCall:
if action.EmbedActions.Call == nil {
return errors.Errorf("empty call definition")
}
var call Call
call.Spec = *action.EmbedActions.Call
_, err := call.ValidateCreate()
return err
default:
return errors.Errorf("Unknown action")
}
}
// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type.
func (in *Scenario) ValidateUpdate(runtime.Object) (admission.Warnings, error) {
return nil, nil
}
// ValidateDelete implements webhook.Validator so a webhook will be registered for the type.
func (in *Scenario) ValidateDelete() (admission.Warnings, error) {
scenariolog.Info("validate delete", "name", in.Name)
// TODO(user): fill in your validation logic upon object deletion.
return nil, nil
}
|
// import (
// "fmt"
// "kto/transaction"
// "kto/types"
// "kto/until"
// )
// func main() {
// l := len("Kto72tzGAwYH7dHGbEH4yiz5gxWSqq9fRDSxXwsJPX98y25")
// fmt.Println(l)
// from_byte := []byte("Kto9sWkzypDGvxfgcXu5eXJrRzwtX9rG1ftPwQ2NMw3TraX")
// to_byte := []byte("Kto72tzGAwYH7dHGbEH4yiz5gxWSqq9fRDSxXwsJPX98y25")
// from_pri := until.Decode("2TtUBFZNELwKB4oeo4heRs3mZezaU84ujaHdVgCrki1jHhqn3yQYTaP92BrgrxeadEsStVE2f4aPL9ETsCeRGZjq")
// from := types.BytesToAddress(from_byte)
// to := types.BytesToAddress(to_byte)
// tx := transaction.New()
// tx = tx.NewTransaction(uint64(2), 600000, from, to, "")
// tx = tx.SignTx(from_pri)
// // tx = tx.NewOrder([]byte("123456"), []byte("qwertyuioiop"), from_byte, uint64(100))
// // tx = tx.SignOrder(from_pri)
// hash, err := tx.SendTransaction()
// if err != nil {
// fmt.Println(err)
// }
// fmt.Println(hash)
// // slice = append(slice, hash)
// }
// addr= 72tzGAwYH7dHGbEH4yiz5gxWSqq9fRDSxXwsJPX98y25
// private= VuNP9drXQAWKSSNZnXAcCwz9adZsDvWA6oXUfvzfJvWtZ1Uen5mSiHoYUoxBQZAybiPeWi264VcMpCAaukTxeS1
// addr= 9sWkzypDGvxfgcXu5eXJrRzwtX9rG1ftPwQ2NMw3TraX
// private= 2TtUBFZNELwKB4oeo4heRs3mZezaU84ujaHdVgCrki1jHhqn3yQYTaP92BrgrxeadEsStVE2f4aPL9ETsCeRGZjq
// addr= 9agPnEwDkAnoPMt2TenpwnBUc6djyykwepBKxUwZiHQE
// private= 1yjeMrBKG3phTqnZh5dnmBxxds4UBC7jBEM8DyTvpmmTk6iQ56yXzotdXTzD1nhBHjhjLu6WSzkYd5B3XaJurb4
package main
import (
"context"
"fmt"
"kto/rpcclient"
"kto/rpcclient/message"
"google.golang.org/grpc"
)
func manin() {
client, err := newClient()
if err != nil {
rpcclient.Error("fail to new client:", err)
return
}
ctx := context.Background()
reqData := &message.ReqTokenCreate{
From: "Kto9sFhbjDdjEHvcdH6n9dtQws1m4ptsAWAy7DhqGdrUFai",
To: "Kto3tPtVvkoBAgxxAgBbd6HpR1jgcfNWsz7554S4ud1PkBD",
Amount: 500001,
Nonce: 1,
Priv: "5BWVgtMPPUPFuCHssYhXxFx2xVfQRTkB1EjKHKu1B1KxdVxD5cswDEdqiko3PjUPFPGfePoKxdfzHvv4YXRCYNp2",
Symbol: "BTC",
Total: "21000000",
}
resp, _ := client.CreateToken(ctx, reqData)
fmt.Println(resp)
}
func newClient() (message.GreeterClient, error) {
conn, err := grpc.Dial("106.12.88.252:8546", grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
rpcclient.Error("fail to dial:", err)
return nil, err
}
clinet := message.NewGreeterClient(conn)
return clinet, nil
}
// func main() {
// loadCfg()
// client, err := newClient()
// if err != nil {
// rpcclient.Error("fail to new client:", err)
// return
// }
// ctx := context.Background()
// reqData := &message.ReqTransaction{
// From: config.AddrMap["0"].Addr,
// Priv: config.AddrMap["0"].Private,
// Amount: uint64(600000),
// }
// for i := 0; i < 1; i++ {
// var toAddr string
// if i/2 == 0 {
// toAddr = config.AddrMap["1"].Addr
// } else {
// toAddr = config.AddrMap["2"].Addr
// }
// rpcclient.Debugf("from:%s,to%s", config.AddrMap["0"].Addr, toAddr)
// nonce, err := client.GetAddressNonceAt(ctx, &message.ReqNonce{Address: config.AddrMap["0"].Addr})
// if err != nil {
// rpcclient.Error("fail to get nonce:", err)
// return
// }
// fmt.Println("nonce =======", nonce)
// rpcclient.Debug("nonce:", nonce.Nonce)
// reqData.To = toAddr
// for j := 0; j < 1; j++ {
// reqData.Nonce = nonce.Nonce + uint64(j)
// respData, err := client.SendTransaction(ctx, reqData)
// if err != nil {
// rpcclient.Errorf("fasttoother:%d,send tx error:%v", reqData.Nonce, err)
// return
// }
// rpcclient.Debug(respData.Hash)
// }
// // time.Sleep(8 * time.Second)
// }
// return
// }
// var config struct {
// AddrMap map[string]struct {
// Addr string
// Private string
// }
// }
// func loadCfg() {
// _, err := toml.DecodeFile("./output/rpctest.toml", &config)
// if err != nil {
// rpcclient.Error(err)
// os.Exit(-1)
// }
// rpcclient.Debug(config)
// rpcclient.Debug(config.AddrMap["0"].Addr)
// }
// func fastToOther(ctx context.Context, client message.GreeterClient) error {
// reqData := &message.ReqTransaction{
// From: config.AddrMap["0"].Addr,
// Priv: config.AddrMap["0"].Private,
// Amount: uint64(500000),
// }
// for i := 0; i < 10; i++ {
// var toAddr string
// if i/2 == 0 {
// toAddr = config.AddrMap["1"].Addr
// } else {
// toAddr = config.AddrMap["2"].Addr
// }
// rpcclient.Debugf("from:%s,to%s", config.AddrMap["0"].Addr, toAddr)
// nonce, err := client.GetAddressNonceAt(ctx, &message.ReqNonce{Address: config.AddrMap["0"].Addr})
// if err != nil {
// rpcclient.Error("fail to get nonce:", err)
// return err
// }
// rpcclient.Debug("nonce:", nonce.Nonce)
// reqData.To = toAddr
// reqData.Nonce = nonce.Nonce
// respData, err := client.SendTransaction(ctx, reqData)
// if err != nil {
// rpcclient.Errorf("fasttoother:%d,send tx error:%v", reqData.Nonce, err)
// return err
// }
// rpcclient.Debug(respData.Hash)
// time.Sleep(8 * time.Second)
// }
// return nil
// }
// func sendEachOther(ctx context.Context, client message.GreeterClient) {
// var i int
// var fromAddr, toAddr string
// for {
// fromkey := strconv.Itoa(i % 3)
// tokey := strconv.Itoa((i + 1) % 3)
// rpcclient.Debugf("fromkey:%s,tokey:%s", fromkey, tokey)
// fromAddr = config.AddrMap[fromkey].Addr
// toAddr = config.AddrMap[tokey].Addr
// priv := config.AddrMap[fromkey].Private
// nonce, err := client.GetAddressNonceAt(ctx, &message.ReqNonce{Address: fromAddr})
// if err != nil {
// rpcclient.Error("fail to get nonce:", err)
// return
// }
// rpcclient.Debugf("fromkey:%s,nonce:%d\n", fromkey, nonce.Nonce)
// reqdata := &message.ReqTransaction{
// From: fromAddr,
// To: toAddr,
// Amount: uint64(500000),
// Nonce: nonce.Nonce,
// Priv: priv,
// }
// respdata, err := client.SendTransaction(ctx, reqdata)
// if err != nil {
// rpcclient.Errorf("snedEachother:%d,err:%v\n", reqdata.Nonce, err)
// return
// }
// rpcclient.Debug(respdata)
// i++
// time.Sleep(8 * time.Second)
// }
// }
// // func getBlockByNum(ctx context.Context, client message.GreeterClient) {
// // for {
// // time.Sleep(20 * time.Second)
// // respMaxNum, err := client.GetMaxBlockNumber(ctx, &message.ReqMaxBlockNumber{})
// // if err != nil {
// // rpcclient.Error("fail to get max num:", err)
// // continue
// // }
// // reqBlockByNum := &message.ReqBlock{Height: respMaxNum.MaxNumber}
// // respBlock, err := client.GetBlockbyNum(ctx, reqBlockByNum)
// // if err != nil {
// // rpcclient.Error("fail to get blcok by num:", err)
// // continue
// // }
// // rpcclient.Debug("number:", string(respBlock.Block))
// // }
// // }
// // func getBlockByHash(ctx context.Context, hash string, client message.GreeterClient) {
// // respBlock, err := client.GetBlockbyHash(ctx, &message.ReqBlock{Hash: hash})
// // if err != nil {
// // rpcclient.Error("fail to get block by hash:", err)
// // return
// // }
// // rpcclient.Debug("hash:", string(respBlock.Block))
// // }
// func keyPairTest() {
// pubbytes, privbytes, err := until.Generprivkey()
// if err != nil {
// rpcclient.Error(err)
// return
// }
// privkey := until.Encode(privbytes)
// pubkey := until.PubtoAddr(pubbytes)
// privBytes2 := until.Decode(privkey)
// // addr := "Kto" + until.Encode(privBytes[32:])
// addr2 := until.PubtoAddr(privBytes2[32:])
// if addr2 == pubkey {
// fmt.Println("success")
// }
// }
// func getBalance(ctx context.Context, client message.GreeterClient) {
// respBalance, err := client.GetBalance(ctx, &message.ReqBalance{
// Address: config.AddrMap["0"].Addr,
// })
// if err != nil {
// rpcclient.Error(err)
// return
// }
// respFrozenAssets, err := client.GetFrozenAssets(ctx, &message.ReqFrozenAssets{
// Addr: config.AddrMap["0"].Addr,
// })
// if err != nil {
// rpcclient.Error("fail to get frozen assets:", err)
// return
// }
// rpcclient.Debugf("before,balance:%d,forzenasssets:%d\n", respBalance.Balnce, respFrozenAssets.FrozenAssets)
// respLockBalance, err := client.SetLockBalance(ctx, &message.ReqLockBalance{
// Address: config.AddrMap["0"].Addr,
// Amount: uint64(1000000),
// })
// rpcclient.Debug("lock:", respLockBalance.Status)
// respBalance, err = client.GetBalance(ctx, &message.ReqBalance{
// Address: config.AddrMap["0"].Addr,
// })
// if err != nil {
// rpcclient.Error(err)
// return
// }
// respFrozenAssets, err = client.GetFrozenAssets(ctx, &message.ReqFrozenAssets{
// Addr: config.AddrMap["0"].Addr,
// })
// if err != nil {
// rpcclient.Error("fail to get frozen assets:", err)
// return
// }
// rpcclient.Debugf("after,balance:%d,forzenasssets:%d\n", respBalance.Balnce, respFrozenAssets.FrozenAssets)
// }
|
package core_test
import (
"github.com/d11wtq/bijou/core"
"github.com/d11wtq/bijou/runtime"
"github.com/d11wtq/bijou/test"
"testing"
)
func example(env runtime.Env, args runtime.Sequence) (runtime.Value, error) {
return args, nil
}
func TestGoFunc(t *testing.T) {
fn := core.GoFunc(example)
if fn.Type() != runtime.FuncType {
t.Fatalf(`expected fn.Type() == FuncType, got %s`, fn.Type())
}
if _, ok := fn.(runtime.Callable); ok == false {
t.Fatalf(`expected fn.(Callable), got false`)
}
args := (&runtime.List{}).Append(runtime.Int(42))
v2, err2 := fn.Call(test.FakeEnv(), args)
if err2 != nil {
t.Fatalf(`expected err2 == nil, got %s`, err2)
}
if v2 != args {
t.Fatalf(`expected v2 == args, got %s`, v2)
}
}
|
package main
import (
"fmt"
"math"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func findMin(nums []int) int {
if len(nums) == 0 {
return math.MaxInt32
}
if len(nums) == 1 {
return nums[0]
}
if len(nums) == 2 {
return min(nums[0], nums[1])
}
if nums[0] < nums[len(nums)-1] {
return nums[0]
}
mid := len(nums) / 2
return min(findMin(nums[:mid]), findMin(nums[mid:]))
}
func main() {
fmt.Println(findMin([]int{3, 4, 5, 1, 2}))
}
|
package main
import (
"fmt"
"math"
"strconv"
)
func check(n int64, p int) int64 {
if p == 1 {
return n - 1
}
b := int64(math.Pow(float64(n), 1./float64(p)))
if b == 1 {
return -1
}
x, s := int64(1), int64(1)
for i := 0; i < p; i++ {
x = x * b
s = s + x
}
if n == s {
return b
} else {
return -1
}
}
func smallestGoodBase(n string) string {
nn, _ := strconv.ParseInt(n, 10, 64)
for i := 64; i >= 0; i-- {
if b := check(nn, i); b != -1 {
return strconv.FormatInt(b, 10)
}
}
return "-1"
}
func main() {
fmt.Println(smallestGoodBase("13"))
fmt.Println(smallestGoodBase("4681"))
fmt.Println(smallestGoodBase("1000000000000000000"))
}
|
package main
import (
"fmt"
"os/exec"
"time"
)
func main() {
ticker := time.Tick(time.Minute * 30)
ticker2 := time.Tick(time.Hour * 24)
out, err := exec.Command("/etc/init.d/ssh", "start").Output()
fmt.Print(out, err)
for {
select {
case <-ticker2:
puppetsync()
case <-ticker:
// http api check account setting
fmt.Println("it's ok")
}
}
}
func puppetsync() {
out, err := exec.Command("/usr/bin/sudo",
"/usr/bin/puppet", "-t").Output()
fmt.Print(out)
if err != nil {
fmt.Print(err)
}
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"net/http/httputil"
"time"
)
// The Time is now
type Time struct {
Time string
}
func logRequest(r *http.Request) string {
requestDump, err := httputil.DumpRequest(r, true)
if err != nil {
fmt.Println(err)
}
return string(requestDump)
}
func timeHandler(format string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
time := Time{time.Now().Format(format)}
js, err := json.Marshal(time)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
log.Println(logRequest(r))
}
}
func main() {
mux := http.NewServeMux()
mux.HandleFunc("/time", timeHandler(time.RFC1123))
log.Println("Listening on :3000 ...")
http.ListenAndServe(":3000", mux)
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package boshdirector
import "encoding/json"
type BoshTask struct {
ID int
State string
Description string
Result string
ContextID string `json:"context_id,omitempty"`
}
type TaskStateType int
const (
TaskQueued = "queued"
TaskProcessing = "processing"
TaskDone = "done"
TaskError = "error"
TaskCancelled = "cancelled"
TaskCancelling = "cancelling"
TaskTimeout = "timeout"
TaskComplete TaskStateType = iota
TaskIncomplete
TaskFailed
TaskUnknown
)
func (t BoshTask) ToLog() string {
output, _ := json.Marshal(t)
return string(output)
}
func (t BoshTask) StateType() TaskStateType {
switch t.State {
case TaskDone:
return TaskComplete
case TaskProcessing, TaskQueued, TaskCancelling:
return TaskIncomplete
case TaskCancelled, TaskError, TaskTimeout:
return TaskFailed
default:
return TaskUnknown
}
}
|
package odoo
import (
"fmt"
)
// AccountAnalyticTag represents account.analytic.tag model.
type AccountAnalyticTag struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
Color *Int `xmlrpc:"color,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// AccountAnalyticTags represents array of account.analytic.tag model.
type AccountAnalyticTags []AccountAnalyticTag
// AccountAnalyticTagModel is the odoo model name.
const AccountAnalyticTagModel = "account.analytic.tag"
// Many2One convert AccountAnalyticTag to *Many2One.
func (aat *AccountAnalyticTag) Many2One() *Many2One {
return NewMany2One(aat.Id.Get(), "")
}
// CreateAccountAnalyticTag creates a new account.analytic.tag model and returns its id.
func (c *Client) CreateAccountAnalyticTag(aat *AccountAnalyticTag) (int64, error) {
ids, err := c.CreateAccountAnalyticTags([]*AccountAnalyticTag{aat})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountAnalyticTag creates a new account.analytic.tag model and returns its id.
func (c *Client) CreateAccountAnalyticTags(aats []*AccountAnalyticTag) ([]int64, error) {
var vv []interface{}
for _, v := range aats {
vv = append(vv, v)
}
return c.Create(AccountAnalyticTagModel, vv)
}
// UpdateAccountAnalyticTag updates an existing account.analytic.tag record.
func (c *Client) UpdateAccountAnalyticTag(aat *AccountAnalyticTag) error {
return c.UpdateAccountAnalyticTags([]int64{aat.Id.Get()}, aat)
}
// UpdateAccountAnalyticTags updates existing account.analytic.tag records.
// All records (represented by ids) will be updated by aat values.
func (c *Client) UpdateAccountAnalyticTags(ids []int64, aat *AccountAnalyticTag) error {
return c.Update(AccountAnalyticTagModel, ids, aat)
}
// DeleteAccountAnalyticTag deletes an existing account.analytic.tag record.
func (c *Client) DeleteAccountAnalyticTag(id int64) error {
return c.DeleteAccountAnalyticTags([]int64{id})
}
// DeleteAccountAnalyticTags deletes existing account.analytic.tag records.
func (c *Client) DeleteAccountAnalyticTags(ids []int64) error {
return c.Delete(AccountAnalyticTagModel, ids)
}
// GetAccountAnalyticTag gets account.analytic.tag existing record.
func (c *Client) GetAccountAnalyticTag(id int64) (*AccountAnalyticTag, error) {
aats, err := c.GetAccountAnalyticTags([]int64{id})
if err != nil {
return nil, err
}
if aats != nil && len(*aats) > 0 {
return &((*aats)[0]), nil
}
return nil, fmt.Errorf("id %v of account.analytic.tag not found", id)
}
// GetAccountAnalyticTags gets account.analytic.tag existing records.
func (c *Client) GetAccountAnalyticTags(ids []int64) (*AccountAnalyticTags, error) {
aats := &AccountAnalyticTags{}
if err := c.Read(AccountAnalyticTagModel, ids, nil, aats); err != nil {
return nil, err
}
return aats, nil
}
// FindAccountAnalyticTag finds account.analytic.tag record by querying it with criteria.
func (c *Client) FindAccountAnalyticTag(criteria *Criteria) (*AccountAnalyticTag, error) {
aats := &AccountAnalyticTags{}
if err := c.SearchRead(AccountAnalyticTagModel, criteria, NewOptions().Limit(1), aats); err != nil {
return nil, err
}
if aats != nil && len(*aats) > 0 {
return &((*aats)[0]), nil
}
return nil, fmt.Errorf("account.analytic.tag was not found with criteria %v", criteria)
}
// FindAccountAnalyticTags finds account.analytic.tag records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountAnalyticTags(criteria *Criteria, options *Options) (*AccountAnalyticTags, error) {
aats := &AccountAnalyticTags{}
if err := c.SearchRead(AccountAnalyticTagModel, criteria, options, aats); err != nil {
return nil, err
}
return aats, nil
}
// FindAccountAnalyticTagIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountAnalyticTagIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountAnalyticTagModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountAnalyticTagId finds record id by querying it with criteria.
func (c *Client) FindAccountAnalyticTagId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountAnalyticTagModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.analytic.tag was not found with criteria %v and options %v", criteria, options)
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package opttester
import (
"bytes"
"context"
gosql "database/sql"
"fmt"
"sort"
"strconv"
"strings"
"text/tabwriter"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/errors"
)
// statsTester is used for testing the quality of our estimated statistics
// by comparing the estimated stats for a given expression to the actual stats
// for that expression. See the comments above testStats for more details.
type statsTester struct {
}
// testStats compares actual and estimated stats for a relational expression,
// and returns formatted tabular output of the stats themselves as well as
// the estimation error.
//
// The output looks like this:
//
// column_names row_count distinct_count null_count
// ... ... ... ...
// ~~~~
// column_names row_count_est row_count_err distinct_count_est distinct_count_err null_count_est null_count_err
// ... ... ... ... ... ... ...
//
// The data above the "~~~~" are the actual stats for the expression, and the
// data below the "~~~~" are the estimated stats as well as the estimation
// error in comparison to the actual stats.
//
// testStats expects that a table with the given tableName representing the
// relational expression has already been created (e.g., by a previous
// invocation of stats-quality in the test file). The table should exist in the
// test catalog with estimated stats already injected.
//
// If rewriteActualStats=true, the table should also exist in the savetables
// database in the running CockroachDB cluster and contain the output of the
// relational expression. If rewriteActualStats=true, testStats will use this
// table to recalculate the actual statistics. Otherwise, it will reuse the
// actual stats in the test output (calculated previously) to compare against
// the estimated stats.
//
func (st statsTester) testStats(
catalog *testcat.Catalog, prevOutputs []string, tableName, headingSep string,
) (_ string, err error) {
defer func() {
if r := recover(); r != nil {
err = errors.AssertionFailedf("%v", r)
}
}()
// Attempt to find a previous stats output corresponding to this table name.
var prevOutput string
for i := range prevOutputs {
split := strings.Split(prevOutputs[i], headingSep)
if len(split) == 2 && split[0] == tableName {
prevOutput = split[1]
break
}
}
const warning = "WARNING: No previous statistics output was found. " +
"To collect actual statistics,\nrun with the rewrite-actual-stats flag set to true."
if !*rewriteActualStats &&
(prevOutput == "" || strings.TrimSuffix(prevOutput, "\n") == warning) {
// No previous stats output was found.
return warning + "\n", nil
}
// Get the actual stats.
const sep = "~~~~"
actualStats, actualStatsMap, err := st.getActualStats(prevOutput, tableName, sep)
if err != nil {
return "", err
}
// Get the estimated stats, which have been stored in the test catalog.
// Sort them by column names for consistent test output.
tab := catalog.Table(tree.NewUnqualifiedTableName(tree.Name(tableName)))
estimatedStats := tab.Stats
sort.Slice(estimatedStats, func(i, j int) bool {
// TODO(rytaft): update this function when we support multi-column stats.
coli := tab.Column(estimatedStats[i].ColumnOrdinal(0)).ColName()
colj := tab.Column(estimatedStats[j].ColumnOrdinal(0)).ColName()
return coli < colj
})
// Set up the test result columns and add column names to the output.
columns := []string{
"column_names",
"row_count_est",
"row_count_err",
"distinct_count_est",
"distinct_count_err",
"null_count_est",
"null_count_err",
}
res := make([]string, len(columns), len(columns)*(len(estimatedStats)+1))
copy(res, columns)
format := func(val float64) string {
return strconv.FormatFloat(val, 'f', 2, 64)
}
formatQErr := func(val float64) string {
// It was shown in "Preventing Bad Plans by Bounding the Impact of
// Cardinality Estimation Errors" by Moerkotte et al. that q-error less
// than or equal to 1.9 does not affect plan quality. Mark errors that
// are above this threshold for easier identification.
const maxQ = 1.9
var marker string
if val > maxQ {
marker = " <=="
}
return format(val) + marker
}
for i := 0; i < len(estimatedStats); i++ {
stat := estimatedStats[i]
if stat.ColumnCount() != 1 {
// We don't collect multi-column stats yet, so we can't compare this to
// anything.
continue
}
col := stat.ColumnOrdinal(0)
colNames := fmt.Sprintf("{%s}", tab.Column(col).ColName())
actualStat, ok := actualStatsMap[colNames]
if !ok {
return "", fmt.Errorf("could not find actual stat for columns %s", colNames)
}
res = append(res,
colNames,
format(float64(stat.RowCount())),
formatQErr(st.qErr(float64(stat.RowCount()), actualStat.rowCount)),
format(float64(stat.DistinctCount())),
formatQErr(st.qErr(float64(stat.DistinctCount()), actualStat.distinctCount)),
format(float64(stat.NullCount())),
formatQErr(st.qErr(float64(stat.NullCount()), actualStat.nullCount)),
)
}
formattedResults := st.formatValues(res, len(columns))
return strings.Join(append(append(actualStats, sep), formattedResults...), "\n"), nil
}
// getActualStats gets the actual statistics from the test output or
// recalculates them if rewriteActualStats is true.
// Returns:
// 1. The actual statistics as a slice of strings (one for each row)
// 2. A map from column names to statistic for comparison with the estimated
// stats.
func (st statsTester) getActualStats(
prevOutput string, tableName string, sep string,
) ([]string, map[string]statistic, error) {
expected := strings.Split(prevOutput, sep)
if len(expected) < 2 && !*rewriteActualStats {
return nil, nil, fmt.Errorf(
"must run with -%s=true to calculate actual stats first", rewriteActualFlag,
)
}
var actualStats []string
if *rewriteActualStats {
var err error
if actualStats, err = st.calculateActualStats(tableName); err != nil {
return nil, nil, err
}
} else {
actualStats = strings.Split(expected[0], "\n")
}
// Remove the last line, which is empty.
actualStats = actualStats[:len(actualStats)-1]
actualStatsMap, err := st.getActualStatsMap(actualStats)
if err != nil {
return nil, nil, err
}
return actualStats, actualStatsMap, nil
}
// calculateActualStats calculates actual statistics for the given table
// and returns them as a list of formatted rows.
//
// It works by connecting to a running database, calling CREATE STATISTICS on
// the given table, and finally using SHOW STATISTICS to get the result rows.
func (st statsTester) calculateActualStats(tableName string) ([]string, error) {
db, err := gosql.Open("postgres", *pgurl)
if err != nil {
return nil, errors.Wrap(err,
"can only recompute actual stats when pointed at a running Cockroach cluster",
)
}
ctx := context.Background()
c, err := db.Conn(ctx)
if err != nil {
return nil, err
}
if _, err := c.ExecContext(ctx,
`SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false`,
); err != nil {
return nil, err
}
if _, err := c.ExecContext(ctx, fmt.Sprintf("USE %s", opt.SaveTablesDatabase)); err != nil {
return nil, err
}
const statName = "s"
if _, err := c.ExecContext(ctx,
fmt.Sprintf("CREATE STATISTICS %s FROM %s", statName, tableName),
); err != nil {
return nil, err
}
// Exclude stats for rowid since that column was added when the table was
// created by the saveTableNode. It would not have been part of the original
// relational expression represented by the table.
rows, err := c.QueryContext(ctx,
fmt.Sprintf("SELECT column_names, row_count, distinct_count, null_count FROM "+
"[SHOW STATISTICS FOR TABLE %s] WHERE statistics_name = '%s' "+
"AND column_names != '{rowid}'::string[] ORDER BY column_names::string", tableName, statName,
),
)
if err != nil {
return nil, err
}
cols, err := rows.Columns()
if err != nil {
return nil, err
}
matrix, err := sqlutils.RowsToStrMatrix(rows)
if err != nil {
return nil, err
}
res := make([]string, 0, len(cols)*(len(matrix)+1))
res = append(res, cols...)
for i := range matrix {
res = append(res, matrix[i]...)
}
return st.formatValues(res, len(cols)), nil
}
type statistic struct {
rowCount float64
distinctCount float64
nullCount float64
}
// getActualStatsMap gets a map of statistics keyed by column names.
// It is used to quickly find the actual stats for comparison with estimated
// statistics on a given set of columns.
func (st statsTester) getActualStatsMap(actualStats []string) (map[string]statistic, error) {
statsMap := make(map[string]statistic, len(actualStats)-1)
const (
colNames = iota
rowCount
distinctCount
nullCount
numColumns
)
// Skip the first line, which contains the column names.
for i := 1; i < len(actualStats); i++ {
tokens := strings.Fields(actualStats[i])
if len(tokens) != numColumns {
return nil, fmt.Errorf("expected %d values per line but found %d", numColumns, len(tokens))
}
rowCount, err := strconv.ParseFloat(strings.TrimSpace(tokens[rowCount]), 64)
if err != nil {
return nil, err
}
distinctCount, err := strconv.ParseFloat(strings.TrimSpace(tokens[distinctCount]), 64)
if err != nil {
return nil, err
}
nullCount, err := strconv.ParseFloat(strings.TrimSpace(tokens[nullCount]), 64)
if err != nil {
return nil, err
}
statsMap[tokens[colNames]] = statistic{
rowCount: rowCount,
distinctCount: distinctCount,
nullCount: nullCount,
}
}
return statsMap, nil
}
// qErr calculates the q-error for the given estimated and actual values.
// q-error is symmetric and multiplicative, and satisfies the formula:
//
// (1/q) * actual <= estimated <= q * actual
//
// A q-error of 1 is a perfect estimate, and a q-error <= 1.9 is considered
// acceptable.
//
// We use q-error because it is a better predictor of plan quality than
// other error metrics. See "Preventing Bad Plans by Bounding the Impact of
// Cardinality Estimation Errors" by Moerkotte et al. for details.
func (st statsTester) qErr(estimated, actual float64) float64 {
var min, max float64
if estimated < actual {
min, max = estimated, actual
} else {
min, max = actual, estimated
}
if max == 0 && min == 0 {
return 1
}
return max / min
}
// formatValues formats data in a tabular output format. It is copied from
// sql/logictest/logic.go.
func (st statsTester) formatValues(vals []string, valsPerLine int) []string {
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 2, 1, 2, ' ', 0)
for line := 0; line < len(vals)/valsPerLine; line++ {
for i := 0; i < valsPerLine; i++ {
fmt.Fprintf(tw, "%s\t", vals[line*valsPerLine+i])
}
fmt.Fprint(tw, "\n")
}
_ = tw.Flush()
// Split into lines and trim any trailing whitespace.
// Note that the last line will be empty (which is what we want).
results := make([]string, 0, len(vals)/valsPerLine)
for _, s := range strings.Split(buf.String(), "\n") {
results = append(results, strings.TrimRight(s, " "))
}
return results
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package header provides the implementation of the encoding and decoding of
// network protocol headers.
package checksum
import (
"bytes"
"fmt"
"math"
"math/bits"
"math/rand"
"testing"
"unsafe"
)
func TestChecksumer(t *testing.T) {
testCases := []struct {
name string
data [][]byte
want uint16
}{
{
name: "empty",
want: 0,
},
{
name: "OneOddView",
data: [][]byte{
{1, 9, 0, 5, 4},
},
want: 1294,
},
{
name: "TwoOddViews",
data: [][]byte{
{1, 9, 0, 5, 4},
{4, 3, 7, 1, 2, 123},
},
want: 33819,
},
{
name: "OneEvenView",
data: [][]byte{
{1, 9, 0, 5},
},
want: 270,
},
{
name: "TwoEvenViews",
data: [][]byte{
[]byte{98, 1, 9, 0},
[]byte{9, 0, 5, 4},
},
want: 30981,
},
{
name: "ThreeViews",
data: [][]byte{
{77, 11, 33, 0, 55, 44},
{98, 1, 9, 0, 5, 4},
{4, 3, 7, 1, 2, 123, 99},
},
want: 34236,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var all bytes.Buffer
var c Checksumer
for _, b := range tc.data {
c.Add(b)
// Append to the buffer. We will check the checksum as a whole later.
if _, err := all.Write(b); err != nil {
t.Fatalf("all.Write(b) = _, %s; want _, nil", err)
}
}
if got, want := c.Checksum(), tc.want; got != want {
t.Errorf("c.Checksum() = %d, want %d", got, want)
}
if got, want := Checksum(all.Bytes(), 0 /* initial */), tc.want; got != want {
t.Errorf("Checksum(flatten tc.data) = %d, want %d", got, want)
}
})
}
}
func TestChecksum(t *testing.T) {
var bufSizes = []int{
0,
1,
2,
3,
4,
7,
8,
15,
16,
31,
32,
63,
64,
127,
128,
255,
256,
257,
1023,
1024,
}
type testCase struct {
buf []byte
initial uint16
csumOrig uint16
csumNew uint16
}
testCases := make([]testCase, 100000)
// Ensure same buffer generation for test consistency.
rnd := rand.New(rand.NewSource(42))
for i := range testCases {
testCases[i].buf = make([]byte, bufSizes[i%len(bufSizes)])
testCases[i].initial = uint16(rnd.Intn(65536))
rnd.Read(testCases[i].buf)
}
for i := range testCases {
testCases[i].csumOrig = old(testCases[i].buf, testCases[i].initial)
testCases[i].csumNew = Checksum(testCases[i].buf, testCases[i].initial)
if got, want := testCases[i].csumNew, testCases[i].csumOrig; got != want {
t.Fatalf("new checksum for (buf = %x, initial = %d) does not match old got: %d, want: %d", testCases[i].buf, testCases[i].initial, got, want)
}
}
}
// TestIncrementalChecksum tests for breakages of Checksummer as described in
// b/289284842.
func TestIncrementalChecksum(t *testing.T) {
buf := []byte{
0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c,
0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d,
0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
}
// Go through buf and check that checksum(buf[:end]) is equivalent to
// an incremental checksum of two chunks of buf[:end].
for end := 2; end <= len(buf); end++ {
for start := 1; start < end; start++ {
t.Run(fmt.Sprintf("end=%d start=%d", end, start), func(t *testing.T) {
var cs Checksumer
cs.Add(buf[:end])
csum := cs.Checksum()
cs = Checksumer{}
cs.Add(buf[:start])
cs.Add(buf[start:end])
csumIncremental := cs.Checksum()
if want := old(buf[:end], 0); csum != want {
t.Fatalf("checksum is wrong: got %x, expected %x", csum, want)
}
if csum != csumIncremental {
t.Errorf("checksums should be the same: %x %x", csum, csumIncremental)
}
})
}
}
}
func BenchmarkChecksum(b *testing.B) {
var bufSizes = []int{64, 128, 256, 512, 1024, 1500, 2048, 4096, 8192, 16384, 32767, 32768, 65535, 65536}
checkSumImpls := []struct {
fn func([]byte, uint16) uint16
name string
}{
{old, "checksum_old"},
{unrolled, "unrolled"},
{bitsLib, "bitslib"},
{Checksum, "checksum"},
}
for _, csumImpl := range checkSumImpls {
// Ensure same buffer generation for test consistency.
rnd := rand.New(rand.NewSource(42))
for _, bufSz := range bufSizes {
b.Run(fmt.Sprintf("%s_%d", csumImpl.name, bufSz), func(b *testing.B) {
tc := struct {
buf []byte
initial uint16
csum uint16
}{
buf: make([]byte, bufSz),
initial: uint16(rnd.Intn(65536)),
}
rnd.Read(tc.buf)
b.ResetTimer()
for i := 0; i < b.N; i++ {
tc.csum = csumImpl.fn(tc.buf, tc.initial)
}
})
}
}
}
// old calculates the checksum (as defined in RFC 1071) of the bytes in
// the given byte array. This function uses a non-optimized implementation. Its
// only retained for reference and to use as a benchmark/test. Most code should
// use the header.Checksum function.
//
// The initial checksum must have been computed on an even number of bytes.
func old(buf []byte, initial uint16) uint16 {
s, _ := oldCalculateChecksum(buf, false, uint32(initial))
return s
}
func oldCalculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) {
v := initial
if odd {
v += uint32(buf[0])
buf = buf[1:]
}
l := len(buf)
odd = l&1 != 0
if odd {
l--
v += uint32(buf[l]) << 8
}
for i := 0; i < l; i += 2 {
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
}
return Combine(uint16(v), uint16(v>>16)), odd
}
func unrolled(buf []byte, initial uint16) uint16 {
s, _ := unrolledCalculateChecksum(buf, false, initial)
return s
}
func bitsLib(buf []byte, initial uint16) uint16 {
s, _ := bitsAdd(buf, false, initial)
return s
}
// bitsAdd is copied from checksum_noasm_unsafe.go so that it can be
// benchmarked.
func bitsAdd(buf []byte, odd bool, initial uint16) (uint16, bool) {
if bits.UintSize == 64 {
// Initialize the accumulator and account for odd byte input.
acc := uint(initial)
if odd {
acc += uint(buf[0])
buf = buf[1:]
}
// It doesn't matter what endianness we use, only that it's
// consistent throughout the calculation. See RFC ?.
acc = ((acc & 0xff00) >> 8) | ((acc & 0x00ff) << 8)
// Compute the checksum.
remaining := len(buf)
var carry uint
for remaining >= 8 {
acc, carry = bits.Add(acc, *(*uint)(unsafe.Pointer(&buf[0])), carry)
remaining -= 8
buf = buf[8:]
}
acc += carry
// Fold the checksum into 16 bits.
for acc > math.MaxUint16 {
acc = (acc & 0xffff) + acc>>16
}
// Swap back to little endian and let unrolledCalculateChecksum
// handle the remaining bytes.
acc = ((acc & 0xff00) >> 8) | ((acc & 0x00ff) << 8)
return unrolledCalculateChecksum(buf, false, uint16(acc))
}
return unrolledCalculateChecksum(buf, odd, initial)
}
|
package connector
import (
"fmt"
"time"
core_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetServiceMap gives all services in a map to look them up in (namespace)-(service) format
func (c *Client) GetServiceMap() (map[string]core_v1.Service, error) {
servicesList, err := c.clientset.CoreV1().Services("").List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
serviceMap := map[string]core_v1.Service{}
for _, service := range servicesList.Items {
serviceMap[fmt.Sprintf("%s-%s", service.GetObjectMeta().GetNamespace(), service.GetObjectMeta().GetName())] = service
}
return serviceMap, nil
}
// WatchServicesForChanges watches changes on services inside Kubernetes
func (c *Client) WatchServicesForChanges() (chan bool, error) {
changeChan := make(chan bool)
go func() {
for {
err := c.watcher(changeChan, c.clientset.CoreV1().Services(""))
fmt.Println(err)
time.Sleep(300 * time.Millisecond) // backoff
}
}()
return changeChan, nil
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package ptstorage
const (
// currentMetaCTE is used by all queries which access the meta row.
// The query returns a default row if there currently is no meta row.
// At the time of writing, there will never be a physical row in the meta
// table with version zero.
currentMetaCTE = `
SELECT
version, num_records, num_spans, total_bytes
FROM
system.protected_ts_meta
UNION ALL
SELECT 0 AS version, 0 AS num_records, 0 AS num_spans, 0 AS total_bytes
ORDER BY
version DESC
LIMIT
1
`
protectQuery = `
WITH
current_meta AS (` + currentMetaCTE + `),
checks AS (` + protectChecksCTE + `),
updated_meta AS (` + protectUpsertMetaCTE + `),
new_record AS (` + protectInsertRecordCTE + `)
SELECT
failed,
num_spans AS prev_spans,
total_bytes AS prev_total_bytes,
version AS prev_version
FROM
checks, current_meta;`
protectChecksCTE = `
SELECT
new_version,
new_num_records,
new_num_spans,
new_total_bytes,
(
($1 > 0 AND new_num_spans > $1)
OR ($2 > 0 AND new_total_bytes > $2)
OR EXISTS(SELECT * FROM system.protected_ts_records WHERE id = $4)
) AS failed
FROM (
SELECT
version + 1 AS new_version,
num_records + 1 AS new_num_records,
num_spans + $3 AS new_num_spans,
total_bytes + length($9) + length($6) + coalesce(length($7:::BYTES),0) AS new_total_bytes
FROM
current_meta
)
`
protectUpsertMetaCTE = `
UPSERT
INTO
system.protected_ts_meta
(version, num_records, num_spans, total_bytes)
(
SELECT
new_version, new_num_records, new_num_spans, new_total_bytes
FROM
checks
WHERE
NOT failed
)
RETURNING
version, num_records, num_spans, total_bytes
`
protectInsertRecordCTE = `
INSERT
INTO
system.protected_ts_records (id, ts, meta_type, meta, num_spans, spans)
(
SELECT
$4, $5, $6, $7, $8, $9
WHERE
NOT EXISTS(SELECT * FROM checks WHERE failed)
)
RETURNING
id
`
getRecordsQueryBase = `
SELECT
id, ts, meta_type, meta, spans, verified
FROM
system.protected_ts_records`
getRecordsQuery = getRecordsQueryBase + ";"
getRecordQuery = getRecordsQueryBase + `
WHERE
id = $1;`
markVerifiedQuery = `
UPDATE
system.protected_ts_records
SET
verified = true
WHERE
id = $1
RETURNING
true
`
releaseQuery = `
WITH
current_meta AS (` + currentMetaCTE + `),
record AS (` + releaseSelectRecordCTE + `),
updated_meta AS (` + releaseUpsertMetaCTE + `)
DELETE FROM
system.protected_ts_records AS r
WHERE
EXISTS(SELECT NULL FROM record WHERE r.id = record.id)
RETURNING
NULL;`
// Collect the number of spans for the record identified by $1.
releaseSelectRecordCTE = `
SELECT
id,
num_spans AS record_spans,
length(spans) + length(meta_type) + coalesce(length(meta),0) AS record_bytes
FROM
system.protected_ts_records
WHERE
id = $1
`
// Updates the meta row if there was a record.
releaseUpsertMetaCTE = `
UPSERT
INTO
system.protected_ts_meta (version, num_records, num_spans, total_bytes)
(
SELECT
version, num_records, num_spans, total_bytes
FROM
(
SELECT
version + 1 AS version,
num_records - 1 AS num_records,
num_spans - record_spans AS num_spans,
total_bytes - record_bytes AS total_bytes
FROM
current_meta RIGHT JOIN record ON true
)
)
RETURNING
1
`
getMetadataQuery = `
WITH
current_meta AS (` + currentMetaCTE + `)
SELECT
version, num_records, num_spans, total_bytes
FROM
current_meta;`
)
|
package commands
import (
"encoding/json"
)
func stringRepresentation(value interface{}) (string, error) {
var result string
switch value.(type) {
case string:
result = value.(string) // use string value as-is
default:
json, err := json.Marshal(value)
if err != nil {
return "", err
}
result = string(json) // return JSON text representation of value object
}
return result, nil
}
|
package routers
import (
"github.com/astaxie/beego"
"github.com/naokij/gotalk/controllers"
)
func init() {
beego.Errorhandler("404", controllers.Error404)
beego.Errorhandler("403", controllers.Error403)
beego.Errorhandler("500", controllers.Error500)
beego.Errorhandler("Once", controllers.ErrorOnce)
beego.Errorhandler("IPBan", controllers.ErrorIPBan)
beego.Router("/", &controllers.MainController{})
//登录
authController := new(controllers.AuthController)
beego.Router("/login", authController, "get:Login;post:DoLogin")
beego.Router("/login/:returnurl(.+)", authController, "get:Login")
beego.Router("/forget-password", authController, "get:ForgetPassword;post:ForgetPassword")
beego.Router("/reset-password/:code([0-9a-zA-Z]+)", authController, "get:ResetPassword;post:ResetPassword")
beego.Router("/logout", authController, "get:Logout")
beego.Router("/register", authController, "get:Register;post:DoRegister")
beego.Router("/register/validate-username", authController, "get:ValidateUsername")
beego.Router("/register/validate-email", authController, "get:ValidateEmail")
beego.Router("/register/validate-captcha", authController, "get:ValidateCaptcha")
beego.Router("/activate/:code([0-9a-zA-Z]+)", authController, "get:Activate")
//社交帐号登录
beego.InsertFilter("/login/*:/access", beego.BeforeRouter, controllers.OAuthAccess)
beego.InsertFilter("/login/*:", beego.BeforeRouter, controllers.OAuthRedirect)
socialAuthController := new(controllers.SocialAuthController)
beego.Router("/register/connect", socialAuthController, "get:Connect;post:Connect")
userController := new(controllers.UserController)
beego.Router("/user/:username(.+)/edit", userController, "get:Edit;post:Edit")
beego.Router("/user/:username(.+)/resend-validation", userController, "get:ResendValidation")
beego.Router("/user/:username(.+)", userController, "get:Profile")
beego.Router("/user-followunfollow", userController, "post:FollowUnfollow")
beego.Router("/users", userController, "get:ListById")
beego.Router("/users/by-rep", userController, "get:ListByRep")
beego.Router("/users/by-digests", userController, "get:ListByDigests")
beego.Router("/users/by-topics", userController, "get:ListByTopics")
beego.Router("/users/by-comments", userController, "get:ListByComments")
beego.Router("/users/by-followers", userController, "get:ListByFollowers")
}
|
package lc
import "strings"
// Time: O(n) - n = letters
// Benchmark: 4ms 4.1mb | 100%
func shortestCompletingWord(licensePlate string, words []string) string {
var count int
letters := make([]int, 26)
for _, ch := range licensePlate {
if ch >= 'a' && ch <= 'z' {
letters[ch-'a']++
count++
}
if ch >= 'A' && ch <= 'Z' {
letters[ch-'A']++
count++
}
}
hasLetters := func(word string) bool {
for ch, total := range letters {
if total == 0 {
continue
}
if strings.Count(word, string(rune(ch+'a'))) < total {
return false
}
}
return true
}
var word string
for _, w := range words {
if hasLetters(w) && (len(w) < len(word) || word == "") {
word = w
}
}
return word
}
|
package adapters
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/smartcontractkit/chainlink/store"
"github.com/smartcontractkit/chainlink/store/models"
)
// Bridge adapter is responsible for connecting the task pipeline to external
// adapters, allowing for custom computations to be executed and included in runs.
type Bridge struct {
models.BridgeType
Params *models.JSON
}
// Perform sends a POST request containing the JSON of the input RunResult to
// the external adapter specified in the BridgeType.
// It records the RunResult returned to it, and optionally marks the RunResult pending.
//
// If the Perform is resumed with a pending RunResult, the RunResult is marked
// not pending and the RunResult is returned.
func (ba *Bridge) Perform(input models.RunResult, store *store.Store) models.RunResult {
if input.Status.Finished() {
return input
} else if input.Status.PendingBridge() {
return resumeBridge(input)
}
return ba.handleNewRun(input, store.Config.BridgeResponseURL())
}
func resumeBridge(input models.RunResult) models.RunResult {
input.Status = models.RunStatusInProgress
return input
}
func (ba *Bridge) handleNewRun(input models.RunResult, bridgeResponseURL *url.URL) models.RunResult {
var err error
if ba.Params != nil {
input.Data, err = input.Data.Merge(*ba.Params)
if err != nil {
return baRunResultError(input, "handling data param", err)
}
}
responseURL := bridgeResponseURL
if *responseURL != *zeroURL {
responseURL.Path += fmt.Sprintf("/v2/runs/%s", input.JobRunID)
}
body, err := ba.postToExternalAdapter(input, responseURL)
if err != nil {
return baRunResultError(input, "post to external adapter", err)
}
return responseToRunResult(body, input)
}
func responseToRunResult(body []byte, input models.RunResult) models.RunResult {
var brr models.BridgeRunResult
err := json.Unmarshal(body, &brr)
if err != nil {
return baRunResultError(input, "unmarshaling JSON", err)
}
rr, err := input.Merge(brr.RunResult)
if err != nil {
return baRunResultError(rr, "Unable to merge received payload", err)
}
return rr
}
func (ba *Bridge) postToExternalAdapter(input models.RunResult, bridgeResponseURL *url.URL) ([]byte, error) {
in, err := json.Marshal(&bridgeOutgoing{
RunResult: input,
ResponseURL: bridgeResponseURL,
})
if err != nil {
return nil, fmt.Errorf("marshaling request body: %v", err)
}
request, err := http.NewRequest("POST", ba.URL.String(), bytes.NewBuffer(in))
if err != nil {
return nil, fmt.Errorf("building outgoing bridge http post: %v", err)
}
request.Header.Set("Authorization", "Bearer "+ba.BridgeType.OutgoingToken)
request.Header.Set("Content-Type", "application/json")
client := http.Client{}
resp, err := client.Do(request)
if err != nil {
return nil, fmt.Errorf("POST request: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
b, _ := ioutil.ReadAll(resp.Body)
err = fmt.Errorf("%v %v", resp.StatusCode, string(b))
return nil, fmt.Errorf("POST response: %v", err)
}
return ioutil.ReadAll(resp.Body)
}
func baRunResultError(in models.RunResult, str string, err error) models.RunResult {
return in.WithError(fmt.Errorf("ExternalBridge %v: %v", str, err))
}
type bridgeOutgoing struct {
models.RunResult
ResponseURL *url.URL
}
func (bp bridgeOutgoing) MarshalJSON() ([]byte, error) {
anon := struct {
JobRunID string `json:"id"`
Data models.JSON `json:"data"`
ResponseURL string `json:"responseURL,omitempty"`
}{
JobRunID: bp.JobRunID,
Data: bp.Data,
ResponseURL: bp.ResponseURL.String(),
}
return json.Marshal(anon)
}
var zeroURL = new(url.URL)
|
package api
import (
"testing"
"ark/store"
)
type mockLoadBalancer struct {
count int
}
type mockStore struct {
rts map[string]*store.Route
}
func (l *mockLoadBalancer) Update([]*store.Route) error {
l.count++
return nil
}
func newStore() store.Store {
return &mockStore{
rts: map[string]*store.Route{},
}
}
func (s *mockStore) Save(r *store.Route) error {
s.rts[r.Name] = r
return nil
}
func (s *mockStore) Load(name string, r *store.Route) error {
rt := s.rts[name]
if rt == nil {
return store.ErrNotFound
}
*r = *rt
return nil
}
func (s *mockStore) LoadAll() ([]*store.Route, error) {
var rts []*store.Route
for _, rt := range s.rts {
rts = append(rts, rt)
}
return rts, nil
}
func (s *mockStore) Delete(name string) error {
if s.rts[name] == nil {
return store.ErrNotFound
}
delete(s.rts, name)
return nil
}
func (s *mockStore) Close() error {
return nil
}
func TestPostRoutes(t *testing.T) {
// TODO(knorton): Test this.
}
|
package main
import (
crand "crypto/rand"
"fmt"
"math"
"math/big"
"math/rand"
"strings"
"sync"
"time"
)
var (
once sync.Once
SeededSecurely bool
)
func init() {
SeedMathRand()
}
func SeedMathRand() {
once.Do(func() {
n, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
rand.Seed(time.Now().UTC().UnixNano())
return
}
rand.Seed(n.Int64())
SeededSecurely = true
})
}
func RandomUuid() (string, error) {
var id string
buf := make([]byte, 16)
_, err := rand.Read(buf)
if err != nil {
return id, err
}
id = fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", buf[:4], buf[4:6], buf[6:8], buf[8:10], buf[10:])
id = strings.ToUpper(id)
return id, nil
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package store
import (
"database/sql"
"encoding/json"
sq "github.com/Masterminds/squirrel"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
)
const (
subscriptionsTable = "Subscription"
eventTable = "Event"
eventDeliveryTable = "EventDelivery"
stateChangeEventTable = "StateChangeEvent"
)
var (
eventDeliveryColumns = []string{"ID", "EventID", "SubscriptionID", "Status", "LastAttempt", "Attempts"}
stateChangeEventSelect = sq.Select("sc.ID, sc.ResourceID, sc.ResourceType, sc.OldState, sc.NewState, sc.EventID, e.Timestamp, e.EventType, e.ExtraData").
From("StateChangeEvent as sc").
Join("Event as e on sc.EventID = e.ID")
)
// CreateStateChangeEvent creates new StateChangeEvent and initializes EventDeliveries.
func (sqlStore *SQLStore) CreateStateChangeEvent(event *model.StateChangeEventData) error {
tx, err := sqlStore.beginTransaction(sqlStore.db)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer tx.RollbackUnlessCommitted()
err = sqlStore.createEvent(tx, &event.Event)
if err != nil {
return errors.Wrap(err, "failed to create event")
}
event.StateChange.EventID = event.Event.ID
err = sqlStore.createStateChangeEvent(tx, &event.StateChange)
if err != nil {
return errors.Wrap(err, "failed to create state change event")
}
subFilter := model.SubscriptionsFilter{EventType: event.Event.EventType, Paging: model.AllPagesNotDeleted()}
subscriptions, err := sqlStore.getSubscriptions(tx, &subFilter)
if err != nil {
return errors.Wrap(err, "failed to get subscriptions")
}
err = sqlStore.createEventDeliveries(tx, &event.Event, subscriptions)
if err != nil {
return errors.Wrap(err, "failed to create event deliveries")
}
err = tx.Commit()
if err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}
func (sqlStore *SQLStore) createEvent(db execer, event *model.Event) error {
event.ID = model.NewID()
extraData, err := json.Marshal(event.ExtraData)
if err != nil {
return errors.Wrap(err, "failed to marshal events' extra data")
}
_, err = sqlStore.execBuilder(db, sq.
Insert(eventTable).
SetMap(map[string]interface{}{
"ID": event.ID,
"EventType": event.EventType,
"Timestamp": event.Timestamp,
"ExtraData": extraData,
}),
)
if err != nil {
return errors.Wrap(err, "failed to create event")
}
return nil
}
func (sqlStore *SQLStore) createStateChangeEvent(db execer, event *model.StateChangeEvent) error {
event.ID = model.NewID()
_, err := sqlStore.execBuilder(db, sq.
Insert(stateChangeEventTable).
SetMap(map[string]interface{}{
"ID": event.ID,
"EventID": event.EventID,
"ResourceID": event.ResourceID,
"ResourceType": event.ResourceType,
"OldState": event.OldState,
"NewState": event.NewState,
}),
)
if err != nil {
return errors.Wrap(err, "failed to create state change event")
}
return nil
}
func (sqlStore *SQLStore) createEventDeliveries(db dbInterface, event *model.Event, subscriptions []*model.Subscription) error {
if len(subscriptions) == 0 {
return nil
}
// Although we do not expect huge number of subscriptions
// max number of prepared statement tokens is 999, so we batch
// for sake of future proofing.
batchSize := 50
for batchStart := 0; batchStart < len(subscriptions); batchStart += batchSize {
end := batchStart + batchSize
if end > len(subscriptions) {
end = len(subscriptions)
}
err := sqlStore.insertEventDeliveries(db, event, subscriptions[batchStart:end])
if err != nil {
return err
}
}
return nil
}
func (sqlStore *SQLStore) insertEventDeliveries(db dbInterface, event *model.Event, subscriptions []*model.Subscription) error {
builder := sq.Insert("EventDelivery").Columns(eventDeliveryColumns...)
for _, sub := range subscriptions {
builder = builder.Values(model.NewID(), event.ID, sub.ID, model.EventDeliveryNotAttempted, 0, 0)
}
_, err := sqlStore.execBuilder(db, builder)
if err != nil {
return errors.Wrap(err, "failed to create event deliveries")
}
return nil
}
// stateChangeEventData is a helper struct for querying joined data of Event and StateChangeEvent.
type stateChangeEventData struct {
EventType model.EventType
Timestamp int64
ExtraData []byte
model.StateChangeEvent
}
func (s stateChangeEventData) toStateChangeEventData() (model.StateChangeEventData, error) {
extraData := model.EventExtraData{}
err := json.Unmarshal(s.ExtraData, &extraData)
if err != nil {
return model.StateChangeEventData{}, errors.Wrapf(err, "failed to unmarshal events' extra data, eventID: %q", s.EventID)
}
return model.StateChangeEventData{
Event: model.Event{
ID: s.EventID,
EventType: s.EventType,
Timestamp: s.Timestamp,
ExtraData: extraData,
},
StateChange: s.StateChangeEvent,
}, nil
}
// GetStateChangeEventsToProcess returns StateChangeEventDeliveryData for given subscription in order of occurrence.
func (sqlStore *SQLStore) GetStateChangeEventsToProcess(subID string) ([]*model.StateChangeEventDeliveryData, error) {
var eventDeliveries []*model.EventDelivery
err := sqlStore.selectBuilder(sqlStore.db, &eventDeliveries,
sq.Select(eventDeliveryColumns...).
From(eventDeliveryTable).
Where("SubscriptionID = ?", subID).
Where(sq.Eq{"Status": []model.EventDeliveryStatus{model.EventDeliveryNotAttempted, model.EventDeliveryRetrying}}),
)
if err != nil {
return nil, errors.Wrap(err, "failed to query event deliveries for subscription")
}
eventIDs := make([]string, 0, len(eventDeliveries))
for _, e := range eventDeliveries {
eventIDs = append(eventIDs, e.EventID)
}
var eventsData []stateChangeEventData
err = sqlStore.selectBuilder(sqlStore.db, &eventsData,
stateChangeEventSelect.
Where(sq.Eq{"sc.EventID": eventIDs}).
OrderBy("e.Timestamp ASC"),
)
if err != nil {
return nil, errors.Wrap(err, "failed to query state change events for subscription")
}
if len(eventsData) != len(eventDeliveries) {
return nil, errors.Errorf("number of found events does not match number of deliveries, events: %d, deliveries: %d",
len(eventsData), len(eventDeliveries))
}
deliveryData := make([]*model.StateChangeEventDeliveryData, len(eventsData))
for i, event := range eventsData {
delivery, found := model.EventDeliveryForEvent(event.EventID, eventDeliveries)
if !found {
return nil, errors.Wrap(err, "failed to find event delivery for the event")
}
eventData, err := event.toStateChangeEventData()
if err != nil {
return nil, err
}
deliveryData[i] = &model.StateChangeEventDeliveryData{
EventDelivery: *delivery,
EventData: eventData,
}
}
return deliveryData, nil
}
// GetStateChangeEvent fetches StateChangeEventData based on specified event ID.
func (sqlStore *SQLStore) GetStateChangeEvent(eventID string) (*model.StateChangeEventData, error) {
var event stateChangeEventData
err := sqlStore.getBuilder(sqlStore.db, &event,
stateChangeEventSelect.Where("e.ID = ?", eventID),
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, errors.Wrap(err, "failed to get event")
}
eventData, err := event.toStateChangeEventData()
if err != nil {
return nil, err
}
return &eventData, nil
}
// GetStateChangeEvents fetches StateChangeEventData based on the filter.
func (sqlStore *SQLStore) GetStateChangeEvents(filter *model.StateChangeEventFilter) ([]*model.StateChangeEventData, error) {
query := stateChangeEventSelect.OrderBy("e.Timestamp DESC")
if filter.Paging.PerPage != model.AllPerPage {
query = query.
Limit(uint64(filter.Paging.PerPage)).
Offset(uint64(filter.Paging.Page * filter.Paging.PerPage))
}
if filter.ResourceType != "" {
query = query.Where("sc.ResourceType = ?", filter.ResourceType)
}
if filter.ResourceID != "" {
query = query.Where("sc.ResourceID = ?", filter.ResourceID)
}
if filter.OldStates != nil {
query = query.Where(sq.Eq{"sc.OldState": filter.OldStates})
}
if filter.NewStates != nil {
query = query.Where(sq.Eq{"sc.NewState": filter.NewStates})
}
var eventsData []stateChangeEventData
err := sqlStore.selectBuilder(sqlStore.db, &eventsData, query)
if err != nil {
return nil, errors.Wrap(err, "failed to query state change events")
}
out := make([]*model.StateChangeEventData, len(eventsData))
for i, ed := range eventsData {
data, err := ed.toStateChangeEventData()
if err != nil {
return nil, err
}
out[i] = &data
}
return out, nil
}
// UpdateEventDeliveryStatus updates status fields of EventDelivery.
func (sqlStore *SQLStore) UpdateEventDeliveryStatus(delivery *model.EventDelivery) error {
_, err := sqlStore.execBuilder(sqlStore.db, sq.Update(eventDeliveryTable).
SetMap(map[string]interface{}{
"Status": delivery.Status,
"Attempts": delivery.Attempts,
"LastAttempt": delivery.LastAttempt,
}).
Where("ID = ?", delivery.ID),
)
if err != nil {
return errors.Wrap(err, "failed to update event delivery status")
}
return nil
}
// GetDeliveriesForSubscription is a helper function used for some tests.
func (sqlStore *SQLStore) GetDeliveriesForSubscription(subID string) ([]*model.EventDelivery, error) {
query := sq.Select("*").From(eventDeliveryTable).
Where("SubscriptionID = ?", subID)
deliveries := []*model.EventDelivery{}
err := sqlStore.selectBuilder(sqlStore.db, &deliveries, query)
if err != nil {
return nil, errors.Wrap(err, "failed to get event deliveries")
}
return deliveries, nil
}
// lockSubscription marks the subscription as locked for exclusive use by the caller.
func (sqlStore *SQLStore) lockSubscription(db execer, subID, lockerID string) (bool, error) {
return sqlStore.lockRowsTx(db, subscriptionsTable, []string{subID}, lockerID)
}
// UnlockSubscription releases a lock previously acquired against a caller.
func (sqlStore *SQLStore) UnlockSubscription(subID, lockerID string, force bool) (bool, error) {
return sqlStore.unlockRows(subscriptionsTable, []string{subID}, lockerID, force)
}
|
package main
import (
"fmt"
"net/http"
"squad-manager/routes"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
)
func main() {
r := mux.NewRouter()
err := godotenv.Load()
if err != nil {
/* TODO Implement the logger to capture all events and remove the panics */
fmt.Println("Found err in env load: ", err)
panic(error(err))
}
routes.CreateRoutes(r)
err = http.ListenAndServe(":8000", r)
if err != nil {
fmt.Println("Found err on listenAndServe:", err)
panic(error(err))
}
}
|
package sgml
import (
"fmt"
"strings"
"unicode"
"github.com/bytesparadise/libasciidoc/pkg/types"
)
func (r *sgmlRenderer) renderStringElement(ctx *context, str *types.StringElement) (string, error) {
// NB: For all SGML flavors we are aware of, the numeric entities from
// Unicode are supported. We generally avoid named entities.
result := str.Content
if !ctx.UseUnicode() {
// convert to entities
result = asciiEntify(result)
}
return result, nil
}
func asciiEntify(source string) string {
out := &strings.Builder{}
out.Grow(len(source))
for _, r := range source {
// This will certain characters that should be escaped alone. Run them through
// escape first if that is a concern.
if r < 128 && (unicode.IsPrint(r) || unicode.IsSpace(r)) {
out.WriteRune(r)
continue
}
// take care that the entity is unsigned (should always be)
fmt.Fprintf(out, "&#%d;", uint32(r)) // TODO: avoid `fmt.Fprintf`, use `fmt.Fprint` instead?
}
return out.String()
}
|
/*
MIT License
Copyright (c) 2020-2021 Kazuhito Suda
This file is part of NGSI Go
https://github.com/lets-fiware/ngsi-go
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package ngsicmd
import (
"errors"
"fmt"
"syscall"
)
type ngsiCmdError struct {
Function string
ErrNo int
Message string
Err error
}
func (e *ngsiCmdError) String() string {
var errno syscall.Errno
var s string
if errors.As(e, &errno) {
s = fmt.Sprintf(": %s", errno)
}
return fmt.Sprintf("%s%03d %s%s", e.Function, e.ErrNo, e.Message, s)
}
func (e *ngsiCmdError) Error() string {
return e.Message
}
func (e *ngsiCmdError) Unwrap() error { return e.Err }
func sprintMsg(funcName string, no int, msg string) string {
return fmt.Sprintf("%s%03d %s", funcName, no, msg)
}
/*
func setNewError(funcName string, num int, newErr error, err *error) {
if *err == nil && newErr != nil {
*err = &ngsiCmdError{funcName, num, newErr.Error(), nil}
}
}
*/
|
package checkstyle
import (
"bytes"
"encoding/json"
"go/ast"
"go/format"
"go/parser"
"go/token"
"strconv"
"strings"
)
type ProblemType string
const (
FileLine ProblemType = "file_line"
FunctionLine ProblemType = "func_line"
ParamsNum ProblemType = "params_num"
ResultsNum ProblemType = "results_num"
Formated ProblemType = "formated"
PackageName ProblemType = "pkg_name"
CamelName ProblemType = "camel_name"
)
type Problem struct {
Position *token.Position
Description string
// SourceLine string
Type ProblemType
}
type Checker interface {
Check(fileName string, src []byte) ([]Problem, error)
IsFatal(p *Problem) bool
}
type checker struct {
FunctionComment bool `json:"func_comment"`
FileLine int `json:"file_line"`
FunctionLine int `json:"func_line"`
MaxIndent int `json:"max_indent"`
Formated bool `json:"formated"`
Fatal []string `json:"fatal"`
ParamsNum int `json:"params_num"`
ResultsNum int `json:"results_num"`
PackageName bool `json:"pkg_name"`
CamelName bool `json:"camel_name"`
}
func New(config []byte) (Checker, error) {
var _checker checker
err := json.Unmarshal(config, &_checker)
if err != nil {
return nil, err
}
return &_checker, nil
}
func (c *checker) Check(fileName string, src []byte) (ps []Problem, err error) {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, fileName, src, parser.ParseComments)
if err != nil {
return nil, err
}
return (&file{fileName, src, c, f, fset, []Problem{}}).check(), nil
}
func (c *checker) IsFatal(p *Problem) bool {
for _, v := range c.Fatal {
if v == string(p.Type) {
return true
}
}
return false
}
type file struct {
fileName string
src []byte
config *checker
ast *ast.File
fset *token.FileSet
problems []Problem
}
func (f *file) isTest() bool {
return strings.HasSuffix(f.fileName, "_test.go")
}
func (f *file) check() (ps []Problem) {
f.checkFormat()
f.checkFileLine()
f.checkFileContent()
return f.problems
}
func (f *file) checkFormat() {
if !f.config.Formated {
return
}
src, err := format.Source(f.src)
if err != nil {
panic(f.fileName + err.Error())
}
if len(src) != len(f.src) || bytes.Compare(src, f.src) != 0 {
desc := "source is not formated"
pos := f.fset.Position(f.ast.Pos())
problem := Problem{Description: desc, Position: &pos, Type: Formated}
f.problems = append(f.problems, problem)
}
}
func (f *file) checkFileLine() {
if f.isTest() {
return
}
lineLimit := f.config.FileLine
if lineLimit == 0 {
return
}
f.fset.Iterate(func(_file *token.File) bool {
lineCount := _file.LineCount()
if lineCount > lineLimit {
desc := strconv.Itoa(lineCount) + " lines more than " + strconv.Itoa(lineLimit)
pos := f.fset.Position(f.ast.End())
problem := Problem{Description: desc, Position: &pos, Type: FileLine}
f.problems = append(f.problems, problem)
}
return true
})
}
func genFuncLineProblem(name string, lineCount, lineLimit int, start token.Position) Problem {
desc := "func " + name + "() body lines num " + strconv.Itoa(lineCount) +
" more than " + strconv.Itoa(lineLimit)
return Problem{Description: desc, Position: &start, Type: FunctionLine}
}
func genParamsNumProblem(name string, paramsNum, limit int, start token.Position) Problem {
desc := "func " + name + "() params num " + strconv.Itoa(paramsNum) +
" more than " + strconv.Itoa(limit)
return Problem{Description: desc, Position: &start, Type: ParamsNum}
}
func genResultsNumProblem(name string, resultsNum, limit int, start token.Position) Problem {
desc := "func " + name + "() results num " + strconv.Itoa(resultsNum) +
" more than " + strconv.Itoa(limit)
return Problem{Description: desc, Position: &start, Type: ResultsNum}
}
func genFuncBodyProblem(name string, start token.Position) Problem {
desc := "func " + name + " expected block '{}'"
return Problem{Description: desc, Position: &start, Type: ResultsNum}
}
func (f *file) checkPkgName(pkg *ast.Ident) {
//ref "http://golang.org/doc/effective_go.html#package-names"
pkgName := pkg.Name
var desc string
if strings.Contains(pkgName, "_") {
suggestName := strings.Replace(pkgName, "_", "/", -1)
desc = "don't use an underscore in package name, " + pkgName + " should be " + suggestName
} else if strings.ToLower(pkgName) != pkgName {
desc = "don't use capital letters in package name: " + pkgName
}
if desc != "" {
start := f.fset.Position(pkg.Pos())
problem := Problem{Description: desc, Position: &start, Type: PackageName}
f.problems = append(f.problems, problem)
}
}
func (f *file) checkFunctionParams(fType *ast.FuncType, funcName string) {
paramsNumLimit := f.config.ParamsNum
resultsNumLimit := f.config.ResultsNum
params := fType.Params
if params != nil {
if paramsNumLimit != 0 && params.NumFields() > paramsNumLimit {
start := f.fset.Position(params.Pos())
problem := genParamsNumProblem(funcName, params.NumFields(), paramsNumLimit, start)
f.problems = append(f.problems, problem)
}
for _, v := range params.List {
for _, pName := range v.Names {
f.checkName(pName, "param", true)
}
}
}
results := fType.Results
if results != nil {
if resultsNumLimit != 0 && results.NumFields() > resultsNumLimit {
start := f.fset.Position(results.Pos())
problem := genResultsNumProblem(funcName, results.NumFields(), resultsNumLimit, start)
f.problems = append(f.problems, problem)
}
for _, v := range results.List {
for _, rName := range v.Names {
f.checkName(rName, "return param", true)
}
}
}
}
func (f *file) checkFunctionLine(funcDecl *ast.FuncDecl) {
lineLimit := f.config.FunctionLine
if lineLimit <= 0 {
return
}
start := f.fset.Position(funcDecl.Pos())
startLine := start.Line
endLine := f.fset.Position(funcDecl.End()).Line
lineCount := endLine - startLine
if lineCount > lineLimit {
problem := genFuncLineProblem(funcDecl.Name.Name, lineCount, lineLimit, start)
f.problems = append(f.problems, problem)
}
}
func (f *file) checkFunctionBody(funcDecl *ast.FuncDecl) {
if funcDecl.Body != nil {
return
}
start := f.fset.Position(funcDecl.Pos())
problem := genFuncBodyProblem(funcDecl.Name.Name, start)
f.problems = append(f.problems, problem)
}
func (f *file) checkFunctionDeclare(funcDecl *ast.FuncDecl) {
f.checkFunctionLine(funcDecl)
f.checkName(funcDecl.Name, "func", false)
f.checkFunctionParams(funcDecl.Type, funcDecl.Name.Name)
f.checkFunctionBody(funcDecl)
receiver := funcDecl.Recv
if receiver != nil && len(receiver.List) != 0 && len(receiver.List[0].Names) != 0 {
f.checkName(receiver.List[0].Names[0], "receiver", true)
}
}
func trimUnderscorePrefix(name string) string {
if name[0] == '_' {
return name[1:]
}
return name
}
func (f *file) checkName(id *ast.Ident, kind string, notFirstCap bool) {
if !f.config.CamelName {
return
}
name := trimUnderscorePrefix(id.Name)
if name == "" {
return
}
start := f.fset.Position(id.Pos())
if strings.Contains(name, "_") {
desc := "don't use non-prefix underscores in " + kind + " name: " + id.Name + ", please use camel name"
problem := Problem{Description: desc, Position: &start, Type: CamelName}
f.problems = append(f.problems, problem)
} else if len(name) >= 5 && strings.ToUpper(name) == name {
desc := "don't use all captial letters in " + kind + " name: " + id.Name + ", please use camel name"
problem := Problem{Description: desc, Position: &start, Type: CamelName}
f.problems = append(f.problems, problem)
} else if notFirstCap && name[0:1] == strings.ToUpper(name[0:1]) {
desc := "in function ,don't use first captial letter in " + kind + " name: " + id.Name + ", please use small letter"
problem := Problem{Description: desc, Position: &start, Type: CamelName}
f.problems = append(f.problems, problem)
}
}
func (f *file) checkStruct(st *ast.StructType) {
if st.Fields == nil {
return
}
for _, v := range st.Fields.List {
for _, v2 := range v.Names {
f.checkName(v2, "struct field", false)
}
}
}
func (f *file) checkInterface(it *ast.InterfaceType) {
if it.Methods == nil {
return
}
for _, v := range it.Methods.List {
for _, v2 := range v.Names {
f.checkName(v2, "interface method", false)
}
if v3, ok := v.Type.(*ast.FuncType); ok {
f.checkFunctionParams(v3, v.Names[0].Name)
}
}
}
func (f *file) checkValueName(decl *ast.GenDecl, kind string, top bool) {
for _, spec := range decl.Specs {
if vSpec, ok := spec.(*ast.ValueSpec); ok {
for _, name := range vSpec.Names {
f.checkName(name, kind, !top)
}
} else if tSpec, ok := spec.(*ast.TypeSpec); ok {
f.checkName(tSpec.Name, kind, false)
ast.Inspect(tSpec.Type, func(node ast.Node) bool {
switch decl2 := node.(type) {
case *ast.GenDecl:
f.checkGenDecl(decl2, false)
case *ast.FuncDecl:
f.checkFunctionDeclare(decl2)
case *ast.StructType:
f.checkStruct(decl2)
case *ast.InterfaceType:
f.checkInterface(decl2)
}
return true
})
} else if iSpec, ok := spec.(*ast.ImportSpec); ok && iSpec.Name != nil {
f.checkName(iSpec.Name, "import", true)
}
}
}
func (f *file) checkGenDecl(decl *ast.GenDecl, top bool) {
if decl.Tok == token.CONST {
f.checkValueName(decl, "const", top)
} else if decl.Tok == token.VAR {
f.checkValueName(decl, "var", top)
} else if decl.Tok == token.TYPE {
f.checkValueName(decl, "type", top)
} else if decl.Tok == token.IMPORT {
f.checkValueName(decl, "import", true)
}
}
func (f *file) checkAssign(assign *ast.AssignStmt) {
if assign.Tok != token.DEFINE {
return
}
for _, v2 := range assign.Lhs {
if assignName, ok := v2.(*ast.Ident); ok {
f.checkName(assignName, "var", true)
}
}
}
func (f *file) checkFileContent() {
if f.isTest() {
return
}
if f.config.PackageName {
f.checkPkgName(f.ast.Name)
}
for _, v := range f.ast.Decls {
switch decl := v.(type) {
case *ast.FuncDecl:
f.checkFunctionDeclare(decl)
if decl.Body == nil {
break
}
ast.Inspect(decl.Body, func(node ast.Node) bool {
switch decl2 := node.(type) {
case *ast.GenDecl:
f.checkGenDecl(decl2, false)
case *ast.FuncDecl:
f.checkFunctionDeclare(decl2)
case *ast.AssignStmt:
f.checkAssign(decl2)
case *ast.StructType:
f.checkStruct(decl2)
}
return true
})
case *ast.GenDecl:
f.checkGenDecl(decl, true)
}
}
}
|
package marathon
import (
"encoding/json"
"github.com/stretchr/testify/require"
"github.com/wndhydrnt/proxym/types"
"log"
"net/http"
"net/http/httptest"
"testing"
)
func TestServicesFromMarathon(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && r.RequestURI == "/v2/apps" && r.Header.Get("Accept") == "application/json" {
marathonApps := Apps{
Apps: []App{
App{
ID: "/redis",
Container: Container{
Docker: Docker{
Network: "BRIDGE",
PortMappings: []PortMapping{PortMapping{ContainerPort: 6379, Protocol: "tcp", ServicePort: 41000}},
},
},
Ports: []int{41000},
},
App{
ID: "/registry",
Container: Container{
Docker: Docker{
Network: "BRIDGE",
PortMappings: []PortMapping{PortMapping{ContainerPort: 5000, Protocol: "tcp", ServicePort: 42000}},
},
},
Labels: map[string]string{
"proxym.domains": "docker-registry.unit.test,registry.unit.test",
"proxym.port.5000.config": "option forwardfor\noption httpchk",
"proxym.port.5000.protocol": "http",
},
Ports: []int{42000},
},
App{
ID: "/graphite-statsd",
Container: Container{
Docker: Docker{
Network: "BRIDGE",
PortMappings: []PortMapping{
PortMapping{ContainerPort: 80, Protocol: "tcp", ServicePort: 43000},
PortMapping{ContainerPort: 2003, Protocol: "tcp", ServicePort: 43001},
PortMapping{ContainerPort: 8125, Protocol: "udp", ServicePort: 43002},
},
},
},
Labels: map[string]string{
"proxym.domains": "graphite.unit.test",
"proxym.port.80.protocol": "http",
},
Ports: []int{43000, 43001, 43002},
},
App{
ID: "/host-networking",
Container: Container{
Docker: Docker{
Network: "HOST",
},
},
Ports: []int{8888},
},
},
}
data, err := json.Marshal(marathonApps)
if err != nil {
log.Fatal("Error marshalling apps")
}
w.Write(data)
return
}
if r.Method == "GET" && r.RequestURI == "/v2/tasks" && r.Header.Get("Accept") == "application/json" {
marathonTasks := Tasks{
Tasks: []Task{
Task{AppID: "/redis", Host: "10.10.10.10", Ports: []int{31001}, ServicePorts: []int{41000}},
Task{AppID: "/redis", Host: "10.10.10.10", Ports: []int{31003}, ServicePorts: []int{41000}},
Task{AppID: "/registry", Host: "10.10.10.10", Ports: []int{31002}, ServicePorts: []int{42000}},
Task{AppID: "/graphite-statsd", Host: "10.10.10.11", Ports: []int{31001, 31002, 31003}, ServicePorts: []int{43000, 43001, 43002}},
Task{AppID: "/host-networking", Host: "10.10.10.10", Ports: []int{31855}, ServicePorts: []int{8888}},
},
}
data, err := json.Marshal(marathonTasks)
if err != nil {
log.Fatal("Error marshalling tasks")
}
w.Write(data)
return
}
}))
defer ts.Close()
c := &http.Client{}
generator := Generator{
httpClient: c,
marathonServers: []string{ts.URL},
}
services, _ := generator.Generate()
require.IsType(t, []*types.Service{}, services)
require.Len(t, services, 6)
require.Equal(t, "marathon_redis_6379", services[0].Id)
require.Equal(t, "", services[0].Config)
require.Len(t, services[0].Domains, 0)
require.Equal(t, 6379, services[0].Port)
require.Equal(t, "tcp", services[0].TransportProtocol)
require.Equal(t, 41000, services[0].ServicePort)
require.Equal(t, "Marathon", services[0].Source)
require.Equal(t, "10.10.10.10", services[0].Hosts[0].Ip)
require.Equal(t, 31001, services[0].Hosts[0].Port)
require.Equal(t, services[0].Hosts[1].Ip, "10.10.10.10")
require.Equal(t, services[0].Hosts[1].Port, 31003)
require.Equal(t, services[1].Id, "marathon_registry_5000")
require.Equal(t, "option forwardfor\noption httpchk", services[1].Config)
require.Len(t, services[1].Domains, 2)
require.Contains(t, services[1].Domains, "docker-registry.unit.test")
require.Contains(t, services[1].Domains, "registry.unit.test")
require.Equal(t, services[1].Port, 5000)
require.Equal(t, services[1].TransportProtocol, "http")
require.Equal(t, services[1].ServicePort, 42000)
require.Equal(t, services[1].Source, "Marathon")
require.Equal(t, services[1].Hosts[0].Ip, "10.10.10.10")
require.Equal(t, services[1].Hosts[0].Port, 31002)
require.Equal(t, services[2].Id, "marathon_graphite-statsd_80")
require.Equal(t, "", services[2].Config)
require.Len(t, services[2].Domains, 1)
require.Contains(t, services[2].Domains, "graphite.unit.test")
require.Equal(t, services[2].Port, 80)
require.Equal(t, services[2].TransportProtocol, "http")
require.Equal(t, services[2].ServicePort, 43000)
require.Equal(t, services[2].Source, "Marathon")
require.Equal(t, services[2].Hosts[0].Ip, "10.10.10.11")
require.Equal(t, services[2].Hosts[0].Port, 31001)
require.Equal(t, services[3].Id, "marathon_graphite-statsd_2003")
require.Equal(t, "", services[3].Config)
require.Len(t, services[3].Domains, 1)
require.Equal(t, services[3].Port, 2003)
require.Equal(t, services[3].TransportProtocol, "tcp")
require.Equal(t, services[3].ServicePort, 43001)
require.Equal(t, services[3].Source, "Marathon")
require.Equal(t, services[3].Hosts[0].Ip, "10.10.10.11")
require.Equal(t, services[3].Hosts[0].Port, 31002)
require.Equal(t, services[4].Id, "marathon_graphite-statsd_8125")
require.Equal(t, "", services[4].Config)
require.Len(t, services[4].Domains, 1)
require.Equal(t, services[4].Port, 8125)
require.Equal(t, services[4].TransportProtocol, "udp")
require.Equal(t, services[4].ServicePort, 43002)
require.Equal(t, services[4].Source, "Marathon")
require.Equal(t, services[4].Hosts[0].Ip, "10.10.10.11")
require.Equal(t, services[4].Hosts[0].Port, 31003)
require.Equal(t, services[5].Id, "marathon_host-networking_8888")
require.Equal(t, "", services[5].Config)
require.Len(t, services[5].Domains, 0)
require.Equal(t, services[5].Port, 8888)
require.Equal(t, services[5].TransportProtocol, "tcp")
require.Equal(t, services[5].ServicePort, 8888)
require.Equal(t, services[5].Source, "Marathon")
require.Equal(t, services[5].Hosts[0].Ip, "10.10.10.10")
require.Equal(t, services[5].Hosts[0].Port, 8888)
}
func TestShouldNotConsiderAppsWithoutPorts(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && r.RequestURI == "/v2/apps" {
marathonApps := Apps{
Apps: []App{
App{
ID: "/dummy",
Container: Container{
Docker: Docker{
Network: "BRIDGE",
PortMappings: []PortMapping{},
},
},
},
},
}
data, err := json.Marshal(marathonApps)
if err != nil {
log.Fatal("Error marshalling apps")
}
w.Write(data)
return
}
if r.Method == "GET" && r.RequestURI == "/v2/tasks" {
marathonTasks := Tasks{
Tasks: []Task{
Task{AppID: "/dummy", Host: "10.10.10.10", Ports: []int{10001}, ServicePorts: []int{31681}},
},
}
data, err := json.Marshal(marathonTasks)
if err != nil {
log.Fatal("Error marshalling tasks")
}
w.Write(data)
return
}
}))
defer ts.Close()
c := &http.Client{}
generator := Generator{
httpClient: c,
marathonServers: []string{ts.URL},
}
services, _ := generator.Generate()
require.Empty(t, services)
}
|
package entity
import (
"net"
"time"
)
type Traffic struct {
Inbound bool
Date time.Time
ProcessName string
Hostname string
SourceIP net.IP
SourcePort int
TargetIp net.IP
TargetPort int
PacketsCnt uint
Size uint
}
|
package file_test
import (
"os"
"testing"
"github.com/matthew-burr/db/file"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func SetupFileTestDat() (*file.DBFile, func()) {
filepath := "file_test.dat"
d := file.Open(filepath)
return d, func() { d.File.Close(); os.Remove(filepath) }
}
func TestDeleteEntry_RemoveEntryFromIndex(t *testing.T) {
d, cleanup := SetupFileTestDat()
defer cleanup()
key := "test"
entry := file.NewEntry(key, file.Value("record"))
d.Index.Update(entry, 0)
require.Contains(t, d.Index, key)
d.DeleteEntry(key)
require.NotContains(t, d.Index, key)
}
func TestDeleteEntry_ReturnsDeletedDBFileEntry(t *testing.T) {
d, cleanup := SetupFileTestDat()
defer cleanup()
key := "test"
got := d.DeleteEntry(key)
assert.True(t, got.Deleted())
assert.Equal(t, key, got.Key())
}
func TestDeleteEntry_WritesTombstoneToFile(t *testing.T) {
d, cleanup := SetupFileTestDat()
defer cleanup()
key := "test"
d.DeleteEntry(key)
rdr, err := os.Open(d.File.Name())
require.NoError(t, err)
var got file.DBFileEntry
_, err = file.DecodeFrom(rdr, &got)
require.NoError(t, err)
assert.True(t, got.Deleted())
assert.Equal(t, key, got.Key())
assert.Equal(t, "", got.Value())
}
func TestWriteEntry_AddsEntryToIndex(t *testing.T) {
d, cleanup := SetupFileTestDat()
defer cleanup()
key := "test"
d.WriteEntry(file.NewEntry(key, file.Value("entry")))
assert.Contains(t, d.Index, key)
assert.Equal(t, d.Index[key], int64(0))
}
func TestReadEntry_ReturnsNotFound(t *testing.T) {
d, cleanup := SetupFileTestDat()
defer cleanup()
entry := d.ReadEntry("foo")
assert.Equal(t, "foo", entry.Key())
assert.Equal(t, "<not found>", entry.Value())
}
|
package persistence
import (
"database/sql"
"fmt"
"log"
"strings"
"github.com/desafios-job/import-data/domain/entity"
)
// InconsistencyRepo struct
type InconsistencyRepo struct {
db *sql.DB
}
// NewInconsistencyRepository new repository
func NewInconsistencyRepository(db *sql.DB) *InconsistencyRepo {
return &InconsistencyRepo{db}
}
// SaveMany inconsistency
func (r *InconsistencyRepo) SaveMany(incosistencies entity.Inconsistencies) {
sqlInsert := `
INSERT INTO public.inconsistency
(filename, error_message)
VALUES %s`
argumentIndexes := GetStatementArgsIndex(2, len(incosistencies))
argumentValues := getStatementIncosistencyValues(incosistencies)
statement := fmt.Sprintf(sqlInsert, strings.Join(argumentIndexes, ","))
tx, err := r.db.Begin()
if err != nil {
tx.Rollback()
log.Fatal(err)
}
_, err = r.db.Exec(statement, argumentValues...)
if err != nil {
tx.Rollback()
log.Fatal(err)
}
tx.Commit()
}
// GetAll incosistencies
func (r *InconsistencyRepo) GetAll() (*sql.Rows, error) {
return FindAll(r.db, "select * from inconsistency")
}
// Truncate clean database and restar identity
func (r *InconsistencyRepo) Truncate() error {
return TruncateTable(r.db, "inconsistency")
}
func getStatementIncosistencyValues(inconsistencies []*entity.Inconsistency) ArgumentValues {
values := []interface{}{}
for _, inconsistency := range inconsistencies {
values = append(values, inconsistency.FileName)
values = append(values, inconsistency.ErrorMessage)
}
return values
}
|
package main
import (
"fmt"
"testing"
)
func TestgetGreeting(t *testing.T) {
name := "Ted"
greeting := getGreeting(name)
fmt.Println(greeting)
if greeting != "" {
t.Errorf("log output should match %q is %q", pattern, line)
}
}
|
package main
import (
"fmt"
"math/rand"
"net"
"time"
)
var serverList []string
func main() {
conn, err := GetConnect()
if err != nil {
fmt.Printf(" connect zk error: %s \n ", err)
return
}
defer conn.Close()
serverList, err = GetServerList(conn)
if err != nil {
fmt.Printf(" get server list error: %s \n", err)
return
}
/*count := len(serverList)
if count == 0 {
err = errors.New("server list is empty")
return
}*/
//用来实时监听服务的上线与下线功能,serverList时刻保持最新的在线服务
snapshots, errors := watchServerList(conn, "/go_servers")
go func() {
/*for {
select {
case serverList = <-snapshots:
fmt.Printf("1111:%+v\n", serverList)
go start()
case err := <-errors:
fmt.Printf("2222:%+v\n", err)
}
}*/
for list := range snapshots {
serverList = list
fmt.Println("11111", serverList)
//start()
}
//<-time.After(time.Second * 100)
}()
MakeDir(conn, "/config")
configs, errors := watchGetDat(conn, "/config")
go func() {
for {
select {
case configData := <-configs:
fmt.Printf("333:%+v\n", string(configData))
case err := <-errors:
fmt.Printf("4444:%+v\n", err)
}
}
}()
/*for {
time.Sleep(1 * time.Second)
}*/
for len(serverList) == 0 {
time.Sleep(1 * time.Second)
}
for i := 0; i < 100; i++ {
fmt.Println("start Client :", i)
startClient()
time.Sleep(1 * time.Second)
}
}
/*func start() {
for i := 0; i < 10; i++ {
fmt.Println("start Client :", i)
startClient()
time.Sleep(1 * time.Second)
}
}*/
func startClient() {
defer func() {
if err := recover(); err != nil {
fmt.Println("err:", err)
}
}()
// service := "127.0.0.1:8899"
//获取地址
for len(serverList) == 0 {
//serverList = <-snapshots
}
serverHost, err := getServerHost()
if err != nil {
fmt.Printf("get server host fail: %s \n", err)
return
}
//serverHost := "127.0.0.1:8899"
fmt.Println("connect host: " + serverHost)
//tcpAddr, err := net.ResolveTCPAddr("tcp4", serverHost)
//checkError(err)
conn, err := net.Dial("tcp", serverHost)
checkError(err)
defer conn.Close()
fmt.Println("connect ok")
_, err = conn.Write([]byte("timestamp"))
checkError(err)
fmt.Println("write ok")
buf := make([]byte, 1024)
n, err := conn.Read(buf)
checkError(err)
fmt.Println("recv:", string(buf[:n]))
return
}
func getServerHost() (host string, err error) {
//随机选中一个返回
r := rand.New(rand.NewSource(time.Now().UnixNano()))
length := len(serverList)
host = serverList[r.Intn(length)]
return
}
|
package unimatrix
import (
"encoding/json"
"fmt"
"strconv"
)
type Parser struct {
Name string
TypeName string
Keys []string
Resources []Resource
Count int
UnlimitedCount int
Offset int
}
type JsonResponse map[string]*json.RawMessage
type StaticResponse struct {
This struct {
Name string `json:"name"`
TypeName string `json:"type_name"`
Ids []interface{} `json:"ids"`
UnlimitedCount int `json:"unlimited_count"`
Offset int `json:"offset"`
Count int `json:"count"`
} `json:"$this"`
AssociationTypes map[string][]*json.RawMessage `json:"$associations"`
Errors []ResourceError `json:"errors"`
}
type ResourceIndex map[string]map[string]Resource
type AssociationIndex map[string]map[string]map[string][]string
func parseIds(idsInterface []interface{}) []string {
var ids []string
for _, idInterface := range idsInterface {
switch id := idInterface.(type) {
case float64:
ids = append(ids, fmt.Sprintf("%.0f", id))
case int:
ids = append(ids, strconv.Itoa(id))
case string:
ids = append(ids, id)
}
}
return ids
}
func buildResourceIndex(jsonResponse JsonResponse, associationIndex AssociationIndex, errors []ResourceError) ResourceIndex {
var resourceIndex = make(ResourceIndex)
for responseKey, responseValue := range jsonResponse {
if string([]rune(responseKey)[0]) != "$" && string(responseKey) != "errors" {
var attributesListRaw []*json.RawMessage
json.Unmarshal(*responseValue, &attributesListRaw)
resourceIndex[responseKey] = make(map[string]Resource)
for _, attributesRaw := range attributesListRaw {
var resourceId ResourceId
json.Unmarshal(*attributesRaw, &resourceId)
resource := *NewResource(responseKey, attributesRaw)
resource.AddAssociationIndices(&resourceIndex, &associationIndex, &errors)
resourceIndex[responseKey][resourceId.Id] = resource
}
}
}
return resourceIndex
}
func buildAssociationIndex(staticResponse StaticResponse) AssociationIndex {
var associationIndex = make(AssociationIndex)
associationTypes := staticResponse.AssociationTypes
for associationType, _ := range associationTypes {
associationIndex[associationType] = make(map[string]map[string][]string)
for _, associationList := range associationTypes[associationType] {
var associationOuter map[string]*json.RawMessage
var associationOuterId string
json.Unmarshal(*associationList, &associationOuter)
json.Unmarshal(*associationOuter["id"], &associationOuterId)
associationIndex[associationType][associationOuterId] = make(map[string][]string)
for key, value := range associationOuter {
if key != "id" {
var associationInner map[string]*json.RawMessage
var associationInnerIds []string
json.Unmarshal(*value, &associationInner)
json.Unmarshal(*associationInner["ids"], &associationInnerIds)
associationIndex[associationType][associationOuterId][key] = associationInnerIds
}
}
}
}
return associationIndex
}
func resources(name string, ids []string, resourceIndex ResourceIndex) []Resource {
var resources []Resource
for _, id := range ids {
if len(resourceIndex[name]) > 0 {
resources = append(resources, resourceIndex[name][id])
}
}
return resources
}
func NewParser(rawResponse []byte) (*Parser, error) {
var staticResponse StaticResponse
var jsonResponse JsonResponse
var associationIndex AssociationIndex
var resourceIndex ResourceIndex
var resourceErrors []ResourceError
var ids []string
json.Unmarshal([]byte(rawResponse), &staticResponse)
json.Unmarshal([]byte(rawResponse), &jsonResponse)
if jsonResponse == nil {
return nil, NewUnimatrixError("Unable to parse json response")
}
this := staticResponse.This
ids = parseIds(this.Ids)
resourceErrors = staticResponse.Errors
associationIndex = buildAssociationIndex(staticResponse)
resourceIndex = buildResourceIndex(jsonResponse, associationIndex, resourceErrors)
return &Parser{
Name: this.Name,
TypeName: this.TypeName,
Keys: ids,
Resources: resources(this.Name, ids, resourceIndex),
Count: this.Count,
UnlimitedCount: this.UnlimitedCount,
Offset: this.Offset,
}, nil
}
|
package usecase
import (
"errors"
"fmt"
"log"
"os"
"strings"
"text/tabwriter"
"time"
"github.com/maestre3d/bob/common/util"
"github.com/maestre3d/bob/entity"
)
// GenerateService Create a new service
func GenerateService(name, appName, description string) error {
// Verify if not exists then insert
app := util.GetSettings()
serviceID := 0
currentWorkspace := util.GetCurrentWorkspace()
if appName == "" && currentWorkspace == "" {
return errors.New("Failed to create service, must be inside an application folder")
}
for i, workspace := range app.Workspaces {
if workspace.Name == strings.ToLower(appName) || workspace.Path == currentWorkspace {
serviceID = i
for _, serviceWork := range workspace.Services {
if serviceWork.Name == strings.ToLower(name) {
return errors.New("Service already exists")
}
}
}
}
service := new(entity.Service)
service.Name = name
service.Description = description
service.Path = app.Workspaces[serviceID].Path + name + "/"
service.CreatedAt = time.Now()
app.Workspaces[serviceID].Services = append(app.Workspaces[serviceID].Services, service)
log.Printf("SERVICE: Created %s/%s service\n", app.Workspaces[serviceID].Name, name)
return util.OverrideSettings(app)
}
// GetAllServiceInfo Get all service(s) information
func GetAllServiceInfo(appName string) error {
w := new(tabwriter.Writer)
app := util.GetSettings()
w.Init(os.Stdout, 0, 8, 2, '\t', tabwriter.Debug|tabwriter.AlignRight)
fmt.Fprintln(w, "Service\tPath\tWorkspace\tCreated At")
for _, workspace := range app.Workspaces {
if workspace.Name == strings.ToLower(appName) {
for _, service := range workspace.Services {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", service.Name, service.Path, workspace.Name, service.CreatedAt)
}
}
}
fmt.Fprintln(w)
w.Flush()
return nil
}
// GetServiceInfo Get service information
func GetServiceInfo(appName, name string) error {
w := new(tabwriter.Writer)
app := util.GetSettings()
w.Init(os.Stdout, 0, 8, 2, '\t', tabwriter.Debug|tabwriter.AlignRight)
fmt.Fprintln(w, "Service\tPath\tWorkspace\tCreated At")
for _, workspace := range app.Workspaces {
if workspace.Name == strings.ToLower(appName) {
for _, service := range workspace.Services {
if service.Name == strings.ToLower(name) {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", service.Name, service.Path, workspace.Name, service.CreatedAt)
}
}
}
}
fmt.Fprintln(w)
w.Flush()
return nil
}
// RemoveService Remove an existing service
func RemoveService(appName, name string) error {
log.Printf("SERVICE: Removed %s/%s service\n", appName, name)
return nil
}
func removeService(s []*entity.Workspace, i int) []*entity.Workspace {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
|
package host
import (
"io"
"os"
)
// RestoreFile overwrite content of a hosts file with the content of a backup.
func RestoreFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.Create(dst)
if err != nil {
return err
}
defer dstFile.Close()
_, err = io.Copy(dstFile, srcFile)
return err
}
|
package common
import (
"errors"
"fmt"
"github.com/go-ini/ini"
"github.com/speedata/gogit"
"os"
"path"
"regexp"
)
func findGitRevision(file string) (string, error) {
gitDir, err := findGitDirectory(file)
if err != nil {
return "", err
}
log.Debugf("Loading revision from git directory '%s'", gitDir)
repository, err := gogit.OpenRepository(gitDir)
if err != nil {
return "", err
}
ref, err := repository.LookupReference("HEAD")
if err != nil {
return "", err
}
ci, err := repository.LookupCommit(ref.Oid)
if err != nil {
return "", err
}
return string(ci.Id().String()[:7]), nil
}
func findGitRemoteURL(file string) (string, error) {
gitDir, err := findGitDirectory(file)
if err != nil {
return "", err
}
log.Debugf("Loading slug from git directory '%s'", gitDir)
gitconfig, err := ini.InsensitiveLoad(fmt.Sprintf("%s/config", gitDir))
if err != nil {
return "", err
}
remote, err := gitconfig.GetSection("remote \"origin\"")
if err != nil {
return "", err
}
urlKey, err := remote.GetKey("url")
if err != nil {
return "", err
}
url := urlKey.String()
return url, nil
}
func findGitSlug(url string) (string, string, error) {
codeCommitHTTPRegex := regexp.MustCompile("^http(s?)://git-codecommit\\.(.+)\\.amazonaws.com/v1/repos/(.+)$")
codeCommitSSHRegex := regexp.MustCompile("ssh://git-codecommit\\.(.+)\\.amazonaws.com/v1/repos/(.+)$")
httpRegex := regexp.MustCompile("^http(s?)://.*github.com.*/(.+)/(.+).git$")
sshRegex := regexp.MustCompile("github.com:(.+)/(.+).git$")
if matches := codeCommitHTTPRegex.FindStringSubmatch(url); matches != nil {
return "CodeCommit", matches[3], nil
} else if matches := codeCommitSSHRegex.FindStringSubmatch(url); matches != nil {
return "CodeCommit", matches[2], nil
} else if matches := httpRegex.FindStringSubmatch(url); matches != nil {
return "GitHub", fmt.Sprintf("%s/%s", matches[2], matches[3]), nil
} else if matches := sshRegex.FindStringSubmatch(url); matches != nil {
return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
}
return "", url, nil
}
func findGitDirectory(fromFile string) (string, error) {
log.Debugf("Searching for git directory in %s", fromFile)
fi, err := os.Stat(fromFile)
if err != nil {
return "", err
}
var dir string
if fi.Mode().IsDir() {
dir = fromFile
} else {
dir = path.Dir(fromFile)
}
gitPath := path.Join(dir, ".git")
fi, err = os.Stat(gitPath)
if err == nil && fi.Mode().IsDir() {
return gitPath, nil
} else if dir == "/" {
return "", errors.New("Unable to find git repo")
} else {
return findGitDirectory(path.Dir(dir))
}
}
|
package server
import (
"fmt"
"net/http"
"time"
"github.com/izikaj/iziproxy/shared"
)
type waitForResponseParams struct {
core *Server
req *shared.Request
signal *CodeSignal
w *http.ResponseWriter
timeout time.Duration
}
func (server *commonWebHelpers) waitForResponse(params waitForResponseParams) (err error) {
select {
case <-*params.signal:
server.processResponse(params)
case <-time.After(params.timeout):
params.core.Stats.timeout()
server.writeFailResponse(params.w, http.StatusGatewayTimeout, "TIMEOUT ERROR")
}
return
}
func (server *commonWebHelpers) processResponse(params waitForResponseParams) (err error) {
params.core.Lock()
d, ok := params.core.pool[params.req.ID]
params.core.Unlock()
if ok {
resp := d.Response
if resp.Status == 0 {
params.core.Stats.fail()
writeFailResponse(params.w, http.StatusBadGateway, "EMPTY RESPONSE FROM CLIENT")
return
}
fmt.Printf("> [%d] %s\n", resp.Status, (*d).Request.Path)
w := *params.w
for _, header := range resp.Headers {
for _, value := range header.Value {
w.Header().Set(header.Name, value)
}
}
w.WriteHeader(resp.Status)
w.Write(resp.Body)
params.core.Lock()
delete(params.core.pool, params.req.ID)
params.core.Unlock()
params.core.Stats.complete()
} else {
params.core.Stats.fail()
writeFailResponse(params.w, http.StatusBadGateway, "NO RESPONSE FROM CLIENT")
}
return
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
import (
"context"
"sync"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
// SeparatedIntentsEnabled controls whether separated intents are written. A
// true setting is also gated on clusterversion.SeparatedIntents. After all
// nodes in a cluster are at or beyond clusterversion.SeparatedIntents,
// different nodes will see the version state transition at different times.
// Even nodes that have not yet seen the transition need to be able to read
// separated intents and to write over separated intents (due to a lease
// transfer from a node that has seen the transition to one that has not).
// Therefore, the clusterversion and the value of this setting do not affect
// whether intentDemuxWriter or intentInterleavingReader are used. They only
// affect whether intentDemuxWriter will write separated intents. As expected,
// this value can be set to false to disable writing of separated intents.
//
// Currently there is no long-running migration to replace all interleaved
// intents with separated intents, but we expect that when a cluster has been
// running with this flag set to true for some time, most ranges will only
// have separated intents. Similarly, setting this to false will gradually
// cause most ranges to only have interleaved intents.
var SeparatedIntentsEnabled = settings.RegisterBoolSetting(
"storage.transaction.separated_intents.enabled",
"if enabled, intents will be written to a separate lock table, instead of being "+
"interleaved with MVCC values",
false,
)
// This file defines wrappers for Reader and Writer, and functions to do the
// wrapping, which depend on the configuration settings above.
// intentDemuxWriter implements 3 methods from the Writer interface:
// PutIntent, ClearIntent, ClearMVCCRangeAndIntents.
type intentDemuxWriter struct {
w Writer
// Must be non-nil if this intentDemuxWriter is used. We do the checking
// lazily when methods are called since the clients of intentDemuxWriter
// initialize it up-front, but don't know if they are being used by code
// that cares about intents (e.g. a temporary Engine used for disk-spilling
// during query execution will never read-write intents).
settings *cluster.Settings
cachedSettingsAreValid bool
clusterVersionIsRecentEnoughCached bool
writeSeparatedIntentsCached bool
}
func wrapIntentWriter(
ctx context.Context, w Writer, settings *cluster.Settings, isLongLived bool,
) intentDemuxWriter {
idw := intentDemuxWriter{w: w, settings: settings}
if !isLongLived && settings != nil {
// Cache the settings for performance.
idw.cachedSettingsAreValid = true
// Be resilient to the version not yet being initialized.
idw.clusterVersionIsRecentEnoughCached = !idw.settings.Version.ActiveVersionOrEmpty(ctx).Less(
clusterversion.ByKey(clusterversion.SeparatedIntents))
idw.writeSeparatedIntentsCached =
SeparatedIntentsEnabled.Get(&idw.settings.SV)
}
return idw
}
// ClearIntent has the same behavior as Writer.ClearIntent. buf is used as
// scratch-space to avoid allocations -- its contents will be overwritten and
// not appended to, and a possibly different buf returned.
func (idw intentDemuxWriter) ClearIntent(
key roachpb.Key,
state PrecedingIntentState,
txnDidNotUpdateMeta bool,
txnUUID uuid.UUID,
buf []byte,
) (_ []byte, separatedIntentCountDelta int, _ error) {
if idw.settings == nil {
return nil, 0, errors.AssertionFailedf("intentDemuxWriter not configured with cluster.Setttings")
}
switch state {
case ExistingIntentInterleaved:
return buf, 0, idw.w.ClearUnversioned(key)
case ExistingIntentSeparated:
var engineKey EngineKey
engineKey, buf = LockTableKey{
Key: key,
Strength: lock.Exclusive,
TxnUUID: txnUUID[:],
}.ToEngineKey(buf)
if txnDidNotUpdateMeta {
return buf, -1, idw.w.SingleClearEngineKey(engineKey)
}
return buf, -1, idw.w.ClearEngineKey(engineKey)
default:
return buf, 0, errors.AssertionFailedf("ClearIntent: invalid preceding state %d", state)
}
}
// PutIntent has the same behavior as Writer.PutIntent. buf is used as
// scratch-space to avoid allocations -- its contents will be overwritten and
// not appended to, and a possibly different buf returned.
func (idw intentDemuxWriter) PutIntent(
ctx context.Context,
key roachpb.Key,
value []byte,
state PrecedingIntentState,
txnDidNotUpdateMeta bool,
txnUUID uuid.UUID,
buf []byte,
) (_ []byte, separatedIntentCountDelta int, _ error) {
if idw.settings == nil {
return nil, 0, errors.AssertionFailedf("intentDemuxWriter not configured with cluster.Setttings")
}
var writeSeparatedIntents bool
if idw.cachedSettingsAreValid {
// Fast-path
writeSeparatedIntents = idw.clusterVersionIsRecentEnoughCached && idw.writeSeparatedIntentsCached
} else {
// Slow-path, when doing writes on the Engine directly. This should not be
// performance sensitive code.
writeSeparatedIntents =
// Be resilient to the version not yet being initialized.
!idw.settings.Version.ActiveVersionOrEmpty(ctx).Less(
clusterversion.ByKey(clusterversion.SeparatedIntents)) &&
SeparatedIntentsEnabled.Get(&idw.settings.SV)
}
var engineKey EngineKey
if state == ExistingIntentSeparated || writeSeparatedIntents {
engineKey, buf = LockTableKey{
Key: key,
Strength: lock.Exclusive,
TxnUUID: txnUUID[:],
}.ToEngineKey(buf)
}
if state == ExistingIntentSeparated && !writeSeparatedIntents {
// Switching this intent from separated to interleaved.
if txnDidNotUpdateMeta {
if err := idw.w.SingleClearEngineKey(engineKey); err != nil {
return buf, 0, err
}
} else {
if err := idw.w.ClearEngineKey(engineKey); err != nil {
return buf, 0, err
}
}
} else if state == ExistingIntentInterleaved && writeSeparatedIntents {
// Switching this intent from interleaved to separated.
if err := idw.w.ClearUnversioned(key); err != nil {
return buf, 0, err
}
}
// Else, staying separated or staying interleaved or there was no preceding
// intent, so don't need to explicitly clear.
if state == ExistingIntentSeparated {
separatedIntentCountDelta = -1
}
// Write intent
if writeSeparatedIntents {
separatedIntentCountDelta++
return buf, separatedIntentCountDelta, idw.w.PutEngineKey(engineKey, value)
}
return buf, separatedIntentCountDelta, idw.w.PutUnversioned(key, value)
}
// ClearMVCCRangeAndIntents has the same behavior as
// Writer.ClearMVCCRangeAndIntents. buf is used as scratch-space to avoid
// allocations -- its contents will be overwritten and not appended to, and a
// possibly different buf returned.
func (idw intentDemuxWriter) ClearMVCCRangeAndIntents(
start, end roachpb.Key, buf []byte,
) ([]byte, error) {
if idw.settings == nil {
return nil, errors.AssertionFailedf("intentDemuxWriter not configured with cluster.Setttings")
}
err := idw.w.ClearRawRange(start, end)
if err != nil {
return buf, err
}
lstart, buf := keys.LockTableSingleKey(start, buf)
lend, _ := keys.LockTableSingleKey(end, nil)
return buf, idw.w.ClearRawRange(lstart, lend)
}
func (idw intentDemuxWriter) safeToWriteSeparatedIntents(ctx context.Context) (bool, error) {
if idw.settings == nil {
return false,
errors.Errorf(
"intentDemuxWriter without cluster.Settings does not support SafeToWriteSeparatedIntents")
}
if idw.cachedSettingsAreValid {
return idw.clusterVersionIsRecentEnoughCached, nil
}
// Be resilient to the version not yet being initialized.
return !idw.settings.Version.ActiveVersionOrEmpty(ctx).Less(
clusterversion.ByKey(clusterversion.SeparatedIntents)), nil
}
// wrappableReader is used to implement a wrapped Reader. A wrapped Reader
// should be used and immediately discarded. It maintains no state of its own
// between calls.
// Why do we not keep the wrapped reader as a member in the caller? Because
// different methods on Reader can need different wrappings depending on what
// they want to observe.
//
// TODO(sumeer): for allocation optimization we could expose a scratch space
// struct that the caller keeps on behalf of the wrapped reader. But can only
// do such an optimization when know that the wrappableReader will be used
// with external synchronization that prevents preallocated buffers from being
// modified concurrently. pebbleBatch.{MVCCGet,MVCCGetProto} have MVCCKey
// serialization allocation optimizations which we can't do below. But those
// are probably not performance sensitive, since the performance sensitive
// code probably uses an MVCCIterator.
type wrappableReader interface {
Reader
rawGet(key []byte) (value []byte, err error)
}
// wrapReader wraps the provided reader, to return an implementation of MVCCIterator
// that supports MVCCKeyAndIntentsIterKind.
func wrapReader(r wrappableReader) *intentInterleavingReader {
iiReader := intentInterleavingReaderPool.Get().(*intentInterleavingReader)
*iiReader = intentInterleavingReader{wrappableReader: r}
return iiReader
}
type intentInterleavingReader struct {
wrappableReader
}
var _ Reader = &intentInterleavingReader{}
var intentInterleavingReaderPool = sync.Pool{
New: func() interface{} {
return &intentInterleavingReader{}
},
}
// Get implements the Reader interface.
func (imr *intentInterleavingReader) MVCCGet(key MVCCKey) ([]byte, error) {
val, err := imr.wrappableReader.rawGet(EncodeKey(key))
if val != nil || err != nil || !key.Timestamp.IsEmpty() {
return val, err
}
// The meta could be in the lock table. Constructing an Iterator for each
// Get is not efficient, but this function is deprecated and only used for
// tests, so we don't care.
ltKey, _ := keys.LockTableSingleKey(key.Key, nil)
iter := imr.wrappableReader.NewEngineIterator(IterOptions{Prefix: true, LowerBound: ltKey})
defer iter.Close()
valid, err := iter.SeekEngineKeyGE(EngineKey{Key: ltKey})
if !valid || err != nil {
return nil, err
}
val = iter.Value()
return val, nil
}
// MVCCGetProto implements the Reader interface.
func (imr *intentInterleavingReader) MVCCGetProto(
key MVCCKey, msg protoutil.Message,
) (ok bool, keyBytes, valBytes int64, err error) {
return pebbleGetProto(imr, key, msg)
}
// NewMVCCIterator implements the Reader interface. The
// intentInterleavingReader can be freed once this method returns.
func (imr *intentInterleavingReader) NewMVCCIterator(
iterKind MVCCIterKind, opts IterOptions,
) MVCCIterator {
if (!opts.MinTimestampHint.IsEmpty() || !opts.MaxTimestampHint.IsEmpty()) &&
iterKind == MVCCKeyAndIntentsIterKind {
panic("cannot ask for interleaved intents when specifying timestamp hints")
}
if iterKind == MVCCKeyIterKind {
return imr.wrappableReader.NewMVCCIterator(MVCCKeyIterKind, opts)
}
return newIntentInterleavingIterator(imr.wrappableReader, opts)
}
func (imr *intentInterleavingReader) Free() {
*imr = intentInterleavingReader{}
intentInterleavingReaderPool.Put(imr)
}
|
package main
import (
"bufio"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
func pthFactor(n int64, p int64) int64 {
var arr []int64
var brr []int64
var i, j int64
k := p - 1
for i = 1; i <= int64(math.Sqrt(float64(n))); i++ {
if n%i == 0 {
arr = append(arr, i)
if n/i != i {
brr = append(brr, n/i)
}
}
}
for i, j = 0, int64(len(brr))-1; i < j; i, j = i+1, j-1 {
brr[i], brr[j] = brr[j], brr[i]
}
arr = append(arr, brr...)
fmt.Println(arr, brr)
if p > int64(len(arr)) {
return 0
} else {
return arr[k]
}
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 16*1024*1024)
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
defer stdout.Close()
writer := bufio.NewWriterSize(stdout, 16*1024*1024)
n, err := strconv.ParseInt(strings.TrimSpace(readLine(reader)), 10, 64)
checkError(err)
p, err := strconv.ParseInt(strings.TrimSpace(readLine(reader)), 10, 64)
checkError(err)
result := pthFactor(n, p)
fmt.Fprintf(writer, "%d\n", result)
writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
package main
import (
"fmt"
"github.com/mdegaris/go-learning/factorial"
"github.com/mdegaris/go-learning/greeting"
"github.com/mdegaris/go-learning/primes"
)
func main() {
greeting.Greet(greeting.ENGLISH)
greeting.Greet(greeting.FRENCH)
greeting.Greet(greeting.SPANISH)
p := 300
f := 7
fmt.Println("Primes list", p, primes.GeneratePrimes(p))
fmt.Println("Factorial of", f, factorial.Factorial(f))
}
|
package progress
import "os"
// progressLogger provides a wrapper around an os.File that can either
// write to the file or ignore all writes completely.
type progressLogger struct {
writeData bool
log *os.File
}
// Write will write to the file and perform a Sync() if writing succeeds.
func (l *progressLogger) Write(b []byte) error {
if !l.writeData {
return nil
}
if _, err := l.log.Write(b); err != nil {
return err
}
return l.log.Sync()
}
// Close will call Close() on the underlying file
func (l *progressLogger) Close() error {
if l.log != nil {
return l.log.Close()
}
return nil
}
// Shutdown will cause the logger to ignore any further writes. It should
// be used when writing causes an error.
func (l *progressLogger) Shutdown() {
l.writeData = false
}
|
package ui
import (
"github.com/galaco/lambda-client/engine"
vguiCore "github.com/galaco/lambda-core/loader/vgui"
"github.com/galaco/lambda-core/vgui"
"github.com/galaco/tinygametools"
"github.com/galaco/filesystem"
)
type Gui struct {
engine.Manager
window *tinygametools.Window
masterPanel vgui.MasterPanel
}
func (ui *Gui) Register() {
}
func (ui *Gui) Update(dt float64) {
ui.Render()
}
func (ui *Gui) Render() {
ui.masterPanel.Draw()
}
// LoadVGUIResource
func (ui *Gui) LoadVGUIResource(fs *filesystem.FileSystem, filename string) error {
p, err := vguiCore.LoadVGUI(fs, filename)
if err != nil {
return err
}
ui.masterPanel.AddChild(p)
return nil
}
func (ui *Gui) MasterPanel() *vgui.MasterPanel {
return &ui.masterPanel
}
func NewGUIManager(win *tinygametools.Window) *Gui {
return &Gui{
window: win,
}
}
|
package main
import (
"fmt"
"runtime"
"sync"
"time"
)
func main() {
runtime.GOMAXPROCS(1)
wg := sync.WaitGroup{}
wg.Add(20)
for i := 0; i < 10; i++ {
go func() {
fmt.Println("first i: ", i)
wg.Done()
}()
for j := 0; j < 10000; j++ {
fmt.Println("first sleep")
}
//time.Sleep(time.Microsecond)
}
for i := 0; i < 10; i++ {
go func(i int) {
fmt.Println("second i: ", i)
wg.Done()
}(i)
}
wg.Wait()
time.Sleep(time.Microsecond)
}
|
package tree
import "fmt"
type treeNode struct {
val int
leftNode *treeNode
rightNode *treeNode
}
// Preorder 树 前序遍历 根-左-右
func Preorder(root *treeNode) {
if root != nil {
fmt.Println(root.val)
Preorder(root.leftNode)
Preorder(root.rightNode)
}
}
// Middleorder 中序遍历 左-根-右
func Middleorder(root *treeNode) {
if root != nil {
Middleorder(root.leftNode)
fmt.Println(root.val)
Middleorder(root.rightNode)
}
}
// Postorder 后序遍历 左-右-根
func Postorder(root *treeNode) {
if root != nil {
Postorder(root.leftNode)
Postorder(root.rightNode)
fmt.Println(root.val)
}
}
|
package lc
// Time: O(n log log n)
// Benchmark: 8ms 4.9mb | 95% 83%
func countPrimes(n int) int {
if n <= 2 {
return 0
}
sieve := make([]byte, n+1)
count := 1
for i := 3; i < n; i += 2 {
if sieve[i] == 1 {
continue
}
for j := 2 * i; j < n; j += i {
sieve[j] = 1
}
count++
}
return count
}
|
/*
This is an example application to demonstrate parsing an ID Token.
*/
package main
import (
"Charles/charles-email-test/services"
"net/http"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/cors"
)
func main() {
router := mux.NewRouter()
router.Handle("/metrics", prometheus.Handler())
//services.Send("Su cita para Charlesfisio ha sido confirmada.")
services.SendHTML()
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowCredentials: true,
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"},
AllowedHeaders: []string{"*"},
ExposedHeaders: []string{"*"},
})
handler := c.Handler(router)
http.ListenAndServe(":8080", handler)
}
|
//
// Package tcx reads garmin XML format files (.tcx file extension) and converts
// them into .fit format Go structures.
//
package tcx
import (
"fmt"
"github.com/jezard/fit"
"math"
"time"
)
// DeviceInfo converts GPS device information from the TCXDB structure to
// strings.
func DeviceInfo(db *TCXDB) (DevName string, DevUnitId string, DevProdID string) {
for _, item := range db.Acts.Act {
DevName = item.Creator.Name
DevUnitId = fmt.Sprint(item.Creator.UnitId)
DevProdID = item.Creator.ProductID
}
return
}
// CvtToFitRecs converts timestamp, position (latitude/longitude), altitude,
// distance, speed and cadence (e.g. tracks) from the TCXDB structure to the
// fit.Record structure.
func CvtToFitRecs(db *TCXDB) (runRecs []fit.Record) {
var hasspeed bool
var hasdist bool
// Determine if TCX file supplied cumulative distance and speed.
for i := range db.Acts.Act {
for j := range db.Acts.Act[i].Laps {
for k := range db.Acts.Act[i].Laps[j].Trk.Pt {
if db.Acts.Act[i].Laps[j].Trk.Pt[k].Speed != 0.0 {
hasspeed = true
}
if db.Acts.Act[i].Laps[j].Trk.Pt[k].Dist != 0.0 {
hasdist = true
}
}
}
}
// Calculate cumulative distance in meters using latitude and longitude if not supplied in XML.
if !hasdist {
var lat0, long0, lat1, long1, totalDist, newDist float64
for i := range db.Acts.Act {
for j := range db.Acts.Act[i].Laps {
for k := range db.Acts.Act[i].Laps[j].Trk.Pt {
if i == 0 && j == 0 && k == 0 {
lat0 = db.Acts.Act[0].Laps[0].Trk.Pt[0].Lat
long0 = db.Acts.Act[0].Laps[0].Trk.Pt[0].Long
} else {
lat1 = db.Acts.Act[i].Laps[j].Trk.Pt[k].Lat
long1 = db.Acts.Act[i].Laps[j].Trk.Pt[k].Long
newDist = Distance(lat0, long0, lat1, long1)
totalDist = totalDist + newDist
lat0 = lat1
long0 = long1
db.Acts.Act[i].Laps[j].Trk.Pt[k].Dist = totalDist
}
}
}
}
}
// Calculate segment speed in meters/sec using distance and timestamp if not supplied in XML.
if !hasspeed {
var lasttime, thistime time.Time
var deltaT time.Duration
var dist, lastdist float64 //meters
for i := range db.Acts.Act {
for j := range db.Acts.Act[i].Laps {
for k := range db.Acts.Act[i].Laps[j].Trk.Pt {
if i == 0 && j == 0 && k == 0 {
lasttime = db.Acts.Act[i].Laps[j].Trk.Pt[k].Time
lastdist = 0.0
db.Acts.Act[i].Laps[j].Trk.Pt[k].Speed = 0.0
} else {
thistime = db.Acts.Act[i].Laps[j].Trk.Pt[k].Time
dist = db.Acts.Act[i].Laps[j].Trk.Pt[k].Dist
deltaT = thistime.Sub(lasttime)
if deltaT.Seconds() > 0.01 {
db.Acts.Act[i].Laps[j].Trk.Pt[k].Speed = (dist - lastdist) / deltaT.Seconds()
} else {
db.Acts.Act[i].Laps[j].Trk.Pt[k].Speed = 0.0
}
lasttime = thistime
lastdist = dist
}
}
}
}
}
// Create the run records.
for i := range db.Acts.Act {
for j := range db.Acts.Act[i].Laps {
for k := range db.Acts.Act[i].Laps[j].Trk.Pt {
var newRec fit.Record
newRec.Timestamp = db.Acts.Act[i].Laps[j].Trk.Pt[k].Time.Unix()
newRec.Position_lat = db.Acts.Act[i].Laps[j].Trk.Pt[k].Lat
newRec.Position_long = db.Acts.Act[i].Laps[j].Trk.Pt[k].Long
newRec.Altitude = db.Acts.Act[i].Laps[j].Trk.Pt[k].Alt
newRec.Distance = db.Acts.Act[i].Laps[j].Trk.Pt[k].Dist
newRec.Speed = db.Acts.Act[i].Laps[j].Trk.Pt[k].Speed
newRec.Cadence = uint8(db.Acts.Act[i].Laps[j].Trk.Pt[k].Cad)
if newRec.Position_lat != 0.0 && newRec.Position_long != 0.0 {
runRecs = append(runRecs, newRec)
}
}
}
}
return runRecs
}
// CvtToFitLaps converts lap-specific values (elapsed time, distance, calories)
// from the TCXDB structure to the fit.Lap structure.
func CvtToFitLaps(db *TCXDB) (runLaps []fit.Lap) {
for i := range db.Acts.Act {
for _, lap := range db.Acts.Act[i].Laps {
var newLap fit.Lap
//TODO This doesn't seem to work. Maybe time.RFC3339 isn't right...
//t, err := time.Parse(lap.Start, time.RFC3339)
//if err != nil {newLap.Timestamp = t.Unix()} else {break}
newLap.Total_elapsed_time = lap.TotalTime
newLap.Total_distance = lap.Dist
newLap.Total_calories = uint16(lap.Calories)
runLaps = append(runLaps, newLap)
}
}
return runLaps
}
// Distance returns the distance (in meters) between two points of
// a given longitude and latitude relatively accurately (using a spherical
// approximation of the Earth) through the Haversin Distance Formula for
// great arc distance on a sphere with accuracy for small distances.
//
// Point coordinates are supplied in degrees and converted into rad. in
// the function.
//
// distance returned is METERS!!!!!!
// http://en.wikipedia.org/wiki/Haversine_formula
func Distance(lat1, lon1, lat2, lon2 float64) float64 {
// convert to radians
var la1, lo1, la2, lo2, r, dlat, dlon float64
la1 = lat1 * math.Pi / 180.0
lo1 = lon1 * math.Pi / 180.0
la2 = lat2 * math.Pi / 180.0
lo2 = lon2 * math.Pi / 180.0
r = 6378100.0 // Earth radius in METERS
// find the differences between the coordinates
dlat = la2 - la1
dlon = lo2 - lo1
// here's the heavy lifting
a := math.Pow(math.Sin(dlat/2.0), 2) + math.Cos(la1)*math.Cos(la2)*math.Pow(math.Sin(dlon/2.0), 2)
c := 2.0 * math.Atan2(math.Sqrt(a), math.Sqrt(1.0-a)) // great circle distance in radians
return c * r
}
// haversin(θ) function
func hsin(theta float64) float64 {
return math.Pow(math.Sin(theta/2), 2)
}
|
package arithmetic
import (
"fmt"
)
func ExampleRegisterVariable() {
// Register a new variable.
RegisterVariable("dayInYear", 365)
v, err := Parse("dayInYear * 2")
if err != nil {
// ...
}
fmt.Println(v)
// Output: 730
}
|
package main
import (
"fmt"
"strings"
"testing"
)
func TestHighestScore(t *testing.T) {
for k, v := range map[string]string{
"72 64 150 | 100 18 33 | 13 250 -6": "100 250 150",
"10 25 -30 44 | 5 16 70 8 | 13 1 31 12": "13 25 70 44",
"100 6 300 20 10 | 5 200 6 9 500 | 1 10 3 400 143": "100 200 300 400 500"} {
if r := highestScore(k); r != v {
t.Errorf("failed: highestScore %s is %s, got %s",
k, v, r)
}
}
}
func max(a, b int) int {
if b > a {
return b
}
return a
}
func highestScore(s string) string {
var (
a int
r []int
u []string
)
for ix, i := range strings.Split(s, " | ") {
for jx, j := range strings.Fields(i) {
fmt.Sscan(j, &a)
if ix == 0 {
r = append(r, a)
} else {
r[jx] = max(r[jx], a)
}
}
}
for _, i := range r {
u = append(u, fmt.Sprint(i))
}
return strings.Join(u, " ")
}
|
package rego
// HaltError is an error type to return from a custom function implementation
// that will abort the evaluation process (analogous to topdown.Halt).
type HaltError struct {
err error
}
// Error delegates to the wrapped error
func (h *HaltError) Error() string {
return h.err.Error()
}
// NewHaltError wraps an error such that the evaluation process will stop
// when it occurs.
func NewHaltError(err error) error {
return &HaltError{err: err}
}
|
package psql
import (
"context"
storage "github.com/adhistria/auth-movie-app/infrastructure/storage"
"github.com/adhistria/auth-movie-app/internal/domain"
log "github.com/sirupsen/logrus"
)
// UserRepository represent user psql
type userRepository struct {
DB *storage.Database
}
// Create add new user to database
func (r *userRepository) Create(ctx context.Context, user *domain.User) error {
query := `INSERT INTO users (name, email, password) VALUES(:name, :email, :password);`
_, err := r.DB.Conn.NamedExec(query, user)
if err != nil {
log.Warnf("Can't create new user: %v", err)
return err
}
return nil
}
// FindByEmail find user by email
func (r *userRepository) FindByEmail(ctx context.Context, user *domain.User) (*domain.User, error) {
newUser := domain.User{}
query := `SELECT * FROM users WHERE email = $1`
err := r.DB.Conn.Get(&newUser, query, user.Email)
if err != nil {
log.Warnf("Can't find user with email: %v", user.Email)
return nil, err
}
return &newUser, nil
}
// NewUserRepository ...
func NewUserRepository(db *storage.Database) domain.UserRepository {
return &userRepository{DB: db}
}
|
package main
// Version is lltsv version string
const Version string = "0.7.0"
|
package renter
import (
"errors"
"sync/atomic"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
var (
ErrUnknownNickname = errors.New("no file known by that nickname")
ErrNicknameOverload = errors.New("a file with the proposed nickname already exists")
)
// A file is a single file that has been uploaded to the network.
type file struct {
Name string
Checksum crypto.Hash // checksum of the decoded file.
// Erasure coding variables:
// piecesRequired <= optimalRecoveryPieces <= totalPieces
ErasureScheme string
PiecesRequired int
OptimalRecoveryPieces int
TotalPieces int
Pieces []filePiece
// DEPRECATED - the new renter scheme has the renter pre-making contracts
// with hosts uploading new contracts through diffs.
UploadParams modules.FileUploadParams
// The file needs to access the renter's lock. This variable is not
// exported so that the persistence functions won't save the whole renter.
renter *Renter
}
// A filePiece contains information about an individual file piece that has
// been uploaded to a host, including information about the host and the health
// of the file piece.
type filePiece struct {
// Implementation node: 'Transferred' is declared first to ensure that it
// is 64-byte aligned. This is necessary to ensure that atomic operations
// work correctly on ARM and x86-32.
Transferred uint64
Active bool // True if the host has the file and has been online somewhat recently.
Repairing bool // True if the piece is currently being uploaded.
Contract types.FileContract // The contract being enforced.
ContractID types.FileContractID // The ID of the contract.
HostIP modules.NetAddress // Where to find the file piece.
StartIndex uint64
EndIndex uint64
PieceSize uint64
PieceIndex int // Indicates the erasure coding index of this piece.
EncryptionKey crypto.TwofishKey
Checksum crypto.Hash
}
// Available indicates whether the file is ready to be downloaded.
func (f *file) Available() bool {
lockID := f.renter.mu.RLock()
defer f.renter.mu.RUnlock(lockID)
// The loop uses an index instead of a range because range copies the piece
// to fresh data. Atomic operations are being concurrently performed on the
// piece, and the copy results in a race condition against the atomic
// operations. By removing the copying, the race condition is eliminated.
var active int
for i := range f.Pieces {
if f.Pieces[i].Active {
active++
}
if active >= f.PiecesRequired {
return true
}
}
return false
}
// UploadProgress indicates how close the file is to being available.
func (f *file) UploadProgress() float32 {
lockID := f.renter.mu.RLock()
defer f.renter.mu.RUnlock(lockID)
// full replication means we just use the progress of most-uploaded piece.
//
// The loop uses an index instead of a range because range copies the piece
// to fresh data. Atomic operations are being concurrently performed on the
// piece, and the copy results in a race condition against the atomic
// operations. By removing the copying, the race condition is eliminated.
var max float32
for i := range f.Pieces {
progress := float32(atomic.LoadUint64(&f.Pieces[i].Transferred)) / float32(f.Pieces[i].PieceSize)
if progress > max {
max = progress
}
}
return 100 * max
}
// Nickname returns the nickname of the file.
func (f *file) Nickname() string {
lockID := f.renter.mu.RLock()
defer f.renter.mu.RUnlock(lockID)
return f.Name
}
// Filesize returns the size of the file.
func (f *file) Filesize() uint64 {
lockID := f.renter.mu.RLock()
defer f.renter.mu.RUnlock(lockID)
// TODO: this will break when we switch to erasure coding.
for i := range f.Pieces {
if f.Pieces[i].Contract.FileSize != 0 {
return f.Pieces[i].Contract.FileSize
}
}
return 0
}
// Repairing returns whether or not the file is actively being repaired.
func (f *file) Repairing() bool {
lockID := f.renter.mu.RLock()
defer f.renter.mu.RUnlock(lockID)
for i := range f.Pieces {
if f.Pieces[i].Repairing {
return true
}
}
return false
}
// TimeRemaining returns the amount of time until the file's contracts expire.
func (f *file) TimeRemaining() types.BlockHeight {
lockID := f.renter.mu.RLock()
defer f.renter.mu.RUnlock(lockID)
largest := types.BlockHeight(0)
for i := range f.Pieces {
if f.Pieces[i].Contract.WindowStart < f.renter.blockHeight {
continue
}
current := f.Pieces[i].Contract.WindowStart - f.renter.blockHeight
if current > largest {
largest = current
}
}
return largest
}
// DeleteFile removes a file entry from the renter.
func (r *Renter) DeleteFile(nickname string) error {
lockID := r.mu.RLock()
defer r.mu.RUnlock(lockID)
_, exists := r.files[nickname]
if !exists {
return ErrUnknownNickname
}
delete(r.files, nickname)
r.save()
return nil
}
// FileList returns all of the files that the renter has.
func (r *Renter) FileList() (files []modules.FileInfo) {
lockID := r.mu.RLock()
defer r.mu.RUnlock(lockID)
for _, f := range r.files {
files = append(files, f)
}
return
}
// RenameFile takes an existing file and changes the nickname. The original
// file must exist, and there must not be any file that already has the
// replacement nickname.
func (r *Renter) RenameFile(currentName, newName string) error {
lockID := r.mu.Lock()
defer r.mu.Unlock(lockID)
// Check that the currentName exists and the newName doesn't.
file, exists := r.files[currentName]
if !exists {
return ErrUnknownNickname
}
_, exists = r.files[newName]
if exists {
return ErrNicknameOverload
}
// Do the renaming.
delete(r.files, currentName)
file.Name = newName
r.files[newName] = file
r.save()
return nil
}
|
package exec
import (
"bufio"
"fmt"
"os"
"github.com/aergoio/aergo/cmd/brick/context"
"github.com/mattn/go-colorable"
)
func init() {
registerExec(&batch{})
}
type batch struct{}
func (c *batch) Command() string {
return "batch"
}
func (c *batch) Syntax() string {
return fmt.Sprintf("%s", context.PathSymbol)
}
func (c *batch) Usage() string {
return fmt.Sprintf("batch `<batch_file_path>`")
}
func (c *batch) Describe() string {
return "batch run"
}
func (c *batch) Validate(args string) error {
_, err := c.parse(args)
return err
}
func (c *batch) parse(args string) (string, error) {
splitArgs := context.SplitSpaceAndAccent(args, false)
if len(splitArgs) != 1 {
return "", fmt.Errorf("invalid format. usage: %s", c.Usage())
}
batchFilePath := splitArgs[0]
if _, err := os.Stat(batchFilePath.Text); os.IsNotExist(err) {
return "", fmt.Errorf("fail to read a brick batch file %s: %s", batchFilePath.Text, err.Error())
}
return batchFilePath.Text, nil
}
func (c *batch) Run(args string) (string, error) {
batchFilePath, _ := c.parse(args)
batchFile, err := os.Open(batchFilePath)
if err != nil {
return "", err
}
var cmdLines []string
scanner := bufio.NewScanner(batchFile)
for scanner.Scan() {
cmdLines = append(cmdLines, scanner.Text())
}
batchFile.Close()
for i, line := range cmdLines {
lineNum := i + 1
stdOut := colorable.NewColorableStdout()
cmd, args := context.ParseFirstWord(line)
if len(cmd) == 0 {
fmt.Fprintf(stdOut, "\x1B[0;37m%3d\x1B[0m\n", lineNum)
continue
} else if context.Comment == cmd {
fmt.Fprintf(stdOut, "\x1B[0;37m%3d \x1B[32m%s\x1B[0m\n", lineNum, line)
continue
}
fmt.Fprintf(stdOut, "\x1B[0;37m%3d \x1B[34;1m%s \x1B[0m%s\n", lineNum, cmd, args)
Broker(line)
}
return "batch exec is finished", nil
}
|
package pando
import (
"encoding/json"
"fmt"
"github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin"
"github.com/kenlabs/pando/pkg/api/types"
"github.com/kenlabs/pando/pkg/api/v1/model"
. "github.com/smartystreets/goconvey/convey"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"testing"
)
func TestPandoInfo(t *testing.T) {
Convey("TestPandoInfo", t, func() {
responseRecorder := httptest.NewRecorder()
testContext, _ := gin.CreateTestContext(responseRecorder)
Convey("Given valid pando info without error, should return the pando info resp", func() {
testPandoInfo := model.PandoInfo{
PeerID: "12D3KooWDhanS6yHjR4CjbtnRtrMFgbzb3YZLGAqn87m442MpEEK",
Addresses: model.APIAddresses{
HttpAPI: "/ip4/127.0.0.1/tcp/9001",
GraphQLAPI: "/ip4/127.0.0.1/tcp/9002",
GraphSyncAPI: "/ip4/127.0.0.1/tcp/9003",
},
}
patch := gomonkey.ApplyMethodFunc(reflect.TypeOf(mockAPI.controller), "PandoInfo",
func() (*model.PandoInfo, error) {
return &testPandoInfo, nil
})
defer patch.Reset()
mockAPI.pandoInfo(testContext)
respBody, err := ioutil.ReadAll(responseRecorder.Result().Body)
if err != nil {
t.Error(err)
}
var resp types.ResponseJson
if err = json.Unmarshal(respBody, &resp); err != nil {
t.Error(err)
}
var actualPandoInfo model.PandoInfo
respData, err := json.Marshal(resp.Data)
if err != nil {
t.Error(err)
}
if err = json.Unmarshal(respData, &actualPandoInfo); err != nil {
t.Errorf("unmarshal pandoInfoData failed, err: %v", err)
}
So(actualPandoInfo, ShouldResemble, testPandoInfo)
So(resp.Code, ShouldEqual, http.StatusOK)
So(resp.Message, ShouldEqual, "OK")
})
Convey("Given a monkey error, should return a monkey error resp", func() {
patch := gomonkey.ApplyMethodFunc(reflect.TypeOf(mockAPI.controller), "PandoInfo",
func() (*model.PandoInfo, error) {
return nil, fmt.Errorf("monkey error")
})
defer patch.Reset()
mockAPI.pandoInfo(testContext)
respBody, err := ioutil.ReadAll(responseRecorder.Result().Body)
var resp types.ResponseJson
if err = json.Unmarshal(respBody, &resp); err != nil {
t.Error(err)
}
So(resp.Message, ShouldEqual, "monkey error")
So(resp.Code, ShouldEqual, http.StatusBadRequest)
So(resp.Data, ShouldBeNil)
})
})
}
|
package main
import (
"github.com/bengtrj/cfcr-cluster-diagram/infra-diagram/generator/deployment"
"github.com/bengtrj/cfcr-cluster-diagram/infra-diagram/generator"
"os"
)
func main() {
deployment, err := deployment.Load("/Users/bengthammarlund/go/src/github.com/bengtrj/cfcr-cluster-diagram/infra-diagram/fixture/cfcr-v0.18.0-manifest.yml")
if err != nil {
panic("could not read the file")
}
generator := generator.Generator{
Deployment:deployment,
}
generator.Generate(os.Stdout)
}
|
package memjudge
import (
"encoding/json"
"fmt"
"github.com/RemmargorP/memjudge/judge"
"github.com/RemmargorP/memjudge/web"
"gopkg.in/mgo.v2"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/http/httputil"
"net/url"
"os"
"runtime"
"strconv"
"time"
)
const MasterPort = ":45100"
const WebPort = ":8080"
type ServerConfig struct {
NumJudges int
NumWebInstances int
}
func (s *ServerConfig) SpreadThreads(threads int) {
if threads < 3 {
s.NumWebInstances = 1
s.NumJudges = 1
return
}
s.NumWebInstances = threads / 3
s.NumWebInstances = 2 // TODO DELETE (TESTING)
s.NumJudges = threads - s.NumWebInstances
}
func DefaultServerConfig() *ServerConfig {
s := &ServerConfig{}
s.SpreadThreads(runtime.NumCPU())
return s
}
type Server struct {
Config *ServerConfig
Judges map[int]chan bool // chans used to kill specified judges
WebInstances map[int]chan bool // or web instances
lastThreadId int
DB *mgo.Database
Proxy *httputil.ReverseProxy
}
func (s *Server) init() {
db_auth_json, err := ioutil.ReadFile("DB_auth.json")
if err != nil {
log.Fatal(err)
}
var db_auth struct {
Url string
DB string
User string
Pass string
CookieStoreSalt string
}
err = json.Unmarshal(db_auth_json, &db_auth)
if err != nil {
log.Fatal(err)
}
var session *mgo.Session
session, err = mgo.Dial(db_auth.Url)
if err != nil {
log.Fatal(err)
}
session.SetSafe(&mgo.Safe{})
s.DB = session.DB(db_auth.DB)
err = s.DB.Login(db_auth.User, db_auth.Pass)
if err != nil {
log.Fatal(err)
}
log.Printf("Successfully connected to the DB.\n")
s.Config = DefaultServerConfig()
log.Printf("Using Default Server Config:\n judges: %d\n web instances: %d\n total threads: %d\n",
s.Config.NumJudges, s.Config.NumWebInstances, s.Config.NumJudges+s.Config.NumWebInstances)
s.Judges = make(map[int]chan bool)
s.WebInstances = make(map[int]chan bool)
for i := 0; i < s.Config.NumJudges; i++ {
routine := &judge.Judge{}
stop := make(chan bool, 1)
go routine.Start(s.lastThreadId, stop, s.DB)
s.Judges[s.lastThreadId] = stop
s.lastThreadId += 1
}
var proxyTargets []*url.URL
for i := 0; i < s.Config.NumWebInstances; i++ {
routine := &web.WebInstance{}
stop := make(chan bool, 1)
go routine.Start(s.lastThreadId, 9000+s.lastThreadId, stop, s.DB)
s.WebInstances[s.lastThreadId] = stop
url, err := url.Parse("http://127.0.0.1:" + strconv.Itoa(9000+s.lastThreadId))
if err != nil {
log.Fatal(err)
}
proxyTargets = append(proxyTargets, url)
s.lastThreadId += 1
}
s.Proxy = &httputil.ReverseProxy{
Director: func(r *http.Request) {
target := proxyTargets[rand.Int()%len(proxyTargets)]
r.URL.Scheme = target.Scheme
r.URL.Host = target.Host
//r.URL.Path = target.Path
},
}
}
func (s *Server) Stop(seconds int64) {
for _, j := range s.Judges {
j <- true
}
for _, wi := range s.WebInstances {
wi <- true
}
time.Sleep(time.Duration(seconds) * time.Second)
os.Exit(0)
}
func (s *Server) Serve() {
w, _ := os.Create("log")
log.SetOutput(w)
s.init()
go func() { // PROXY (RANDOMIZED LOAD BALANCER)
log.Fatal(http.ListenAndServe(WebPort, s.Proxy))
}()
mux := http.NewServeMux()
mux.HandleFunc("/", s.handler)
HttpServer := &http.Server{
Addr: MasterPort,
Handler: mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
log.Fatal(HttpServer.ListenAndServe())
}
func (s *Server) handler(rw http.ResponseWriter, req *http.Request) {
req.ParseForm()
option := req.Form.Get("option")
fmt.Fprintf(rw, "<html><title>Memjudge Control</title><body>")
switch option {
case "stop":
fmt.Fprintf(rw, "<p><strong>Server gonna be stopped now.</strong></p>")
log.Println("Shutdown initiated...")
go s.Stop(1)
default:
fmt.Fprintf(rw, "<p>Unknown option: <strong>%s</strong></p>", option)
}
fmt.Fprintf(rw, "</body></html>")
}
|
package humanize
import (
"go/ast"
"go/token"
)
var (
lastConst Type
)
// Constant is a string represent of a function parameter
type Constant struct {
Name string
Type Type
Docs Docs
Value string
caller *ast.CallExpr
indx int
}
func constantFromValue(name string, indx int, e []ast.Expr, src string, f *File, p *Package) *Constant {
var t Type
var caller *ast.CallExpr
var ok bool
if len(e) == 0 {
return &Constant{
Name: name,
}
}
first := e[0]
if caller, ok = first.(*ast.CallExpr); !ok {
switch data := e[indx].(type) {
case *ast.BasicLit:
switch data.Kind {
case token.INT:
t = &IdentType{
srcBase{p, getSource(data, src)},
"int",
}
case token.FLOAT:
t = &IdentType{
srcBase{p, getSource(data, src)},
"float64",
}
case token.IMAG:
t = &IdentType{
srcBase{p, getSource(data, src)},
"complex64",
}
case token.CHAR:
t = &IdentType{
srcBase{p, getSource(data, src)},
"char",
}
case token.STRING:
t = &IdentType{
srcBase{p, getSource(data, src)},
"string",
}
}
case *ast.Ident:
t = &IdentType{
srcBase{p, getSource(data, src)},
nameFromIdent(data),
}
// default:
}
}
return &Constant{
Name: name,
Type: t,
caller: caller,
indx: indx,
}
}
func constantFromExpr(name string, e ast.Expr, src string, f *File, p *Package) *Constant {
return &Constant{
Name: name,
Type: getType(e, src, f, p),
}
}
func getConstantValue(a []ast.Expr) string {
if len(a) == 0 {
return ""
}
switch first := a[0].(type) {
case *ast.BasicLit:
return first.Value
default:
return "NotSupportedYet"
}
}
// NewConstant return an array of constant in the scope
func NewConstant(v *ast.ValueSpec, c *ast.CommentGroup, src string, f *File, p *Package) []*Constant {
var res []*Constant
for i := range v.Names {
name := nameFromIdent(v.Names[i])
var n *Constant
if v.Type != nil {
n = constantFromExpr(name, v.Type, src, f, p)
} else {
n = constantFromValue(name, i, v.Values, src, f, p)
}
n.Value = getConstantValue(v.Values)
if n.Type == nil {
n.Type = lastConst
} else {
lastConst = n.Type
}
n.Name = name
n.Docs = docsFromNodeDoc(c, v.Doc)
res = append(res, n)
}
return res
}
|
package types
import (
"github.com/hyperhq/hyper/lib/docker/cliconfig"
)
type ImagePushConfig struct {
MetaHeaders map[string][]string
AuthConfig *cliconfig.AuthConfig
Tag string
}
type ImagePullConfig struct {
MetaHeaders map[string][]string
AuthConfig *cliconfig.AuthConfig
}
|
/**
* Copyright 2017 authors
*
* Licensed under the Apache License, Version 2.0 (the "License"): you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http: *www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
// Created by xuning on 2017/1/2
package main
import (
"log"
"github.com/l0vest0rm/gostream"
)
type MyBolt struct {
*gostream.BaseBolt
sum int64
}
func NewMyBolt() gostream.IBolt {
t := &MyBolt{}
t.BaseBolt = gostream.NewBaseBolt()
return t
}
func (t *MyBolt) NewInstance() gostream.IBolt {
t1 := &MyBolt{}
t1.BaseBolt = t.BaseBolt.Copy()
return t1
}
func (t *MyBolt) Prepare(index int, context gostream.TopologyContext, collector gostream.IOutputCollector) {
t.BaseBolt.Prepare(index, context, collector)
}
func (t *MyBolt) Cleanup() {
log.Printf("Cleanup,boltid:%s,index:%d,sum:%d\n", t.Context.GetThisComponentId(), t.Index, t.sum)
}
func (t *MyBolt) Execute(message gostream.Message) {
_ = message.(MyMsg)
t.sum += 1
} |
package main
import (
"bufio"
"fmt"
"log"
"net"
"strings"
)
func request(c net.Conn) string {
rd := bufio.NewScanner(c)
i := 0
var uri string
for rd.Scan() {
line := rd.Text()
fmt.Println(line)
if i == 0 {
uri = strings.Fields(line)[1]
fmt.Println("***URI IS", uri)
}
if line == "" {
break
}
i++
}
return uri
}
func response(c net.Conn, uri string) {
body := "<!DOCTYPE html><html lang='en'><head><meta charet='UTF-8'><title></title></head><body><strong>" + uri + "</strong></body></html"
fmt.Fprintf(c, "HTTP/1.1 200 OK\r\n")
fmt.Fprintf(c, "Content-Length: %d\r\n", len(body))
fmt.Fprintf(c, "Content-Type: text/html\r\n")
fmt.Fprintf(c, "\r\n")
fmt.Fprintf(c, body)
}
func handle(c net.Conn) {
// Notice to close conn after using
defer c.Close()
uri := request(c)
response(c, uri)
}
func main() {
// notice first parameter is protocal
li, err := net.Listen("tcp", ":8080")
if err != nil {
log.Panic(err.Error())
}
// Notice to close listenner after func
defer li.Close()
// Notice to use infinite loop to get connection forever
// from specific port.
for {
conn, err := li.Accept()
if err != nil {
log.Panic(err.Error())
}
go handle(conn)
}
}
|
package main
type person struct {
first string
last string
location string
}
func main() {
}
|
package main
import "fmt"
func main() {
fmt.Println(greeting("Lukas"))
fmt.Println(getSum(56, 102))
}
func greeting(name string) string { //func name(arg type) return-type
return "Hello, " + name
}
func getSum(num1, num2 int) int {
return num1 + num2
}
|
// Copyright 2020 Tim Shannon. All rights reserved.
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package reflex
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"reflect"
"reflex/client"
"github.com/gorilla/websocket"
"golang.org/x/net/html"
)
// Page defines the Data and events used to run the reflex Template
// if Upgrader is not set, defaults to buffer sizes of 512
// if ErrorHandler is not set, defaults to http.Error 500 and logs error to stdout
type Page struct {
ElementID string
Events EventFuncs
Data interface{}
Upgrader websocket.Upgrader
ErrorHandler func(w http.ResponseWriter, r *http.Request, err error)
}
func defaultErrorHandler(w http.ResponseWriter, r *http.Request, err error) {
log.Print(err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
}
// EventFuncs can have Event, *http.Request, or websocket.Conn parameters
type EventFuncs map[string]interface{}
var requestType = reflect.TypeOf(&http.Request{})
// SetupFunc is the function that builds and returns the Page elements for use with the reflex template
type SetupFunc func() *Page
// Template defines a reflex template that update and respond to DOM events
type Template struct {
text string
}
// Parse creates a reflex template from the passed in text
func Parse(text string) *Template {
return &Template{text: text}
}
// ParseFile creates a reflex template from the passed in file location
func ParseFile(file string) (*Template, error) {
b, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
return &Template{text: string(b)}, nil
}
// Must is similar to core Go template.Must, panics if an error is thrown during parsing
func Must(t *Template, err error) *Template {
if err != nil {
panic(err)
}
return t
}
// Setup sets up the reflex template for use as a standard http.Handler
func (t *Template) Setup(setup SetupFunc) http.Handler {
pg := setup()
if reflect.TypeOf(pg.Data).Kind() != reflect.Ptr {
panic("Page Data must be a pointer")
}
if pg.ElementID == "" {
panic("Element ID must be set")
}
funcs := map[string]interface{}{
"client": func() template.HTML {
return template.HTML(client.Inject)
},
}
for name := range pg.Events {
eventName := name
funcs[eventName] = func(in ...interface{}) (template.JS, error) {
js := "reflex.event(event, '" + eventName + "'"
if len(in) > 0 {
args := make([]interface{}, 0, len(in))
for i := range in {
argType := reflect.TypeOf(in[i])
if argType != eventType && argType != requestType {
args = append(args, in[i])
}
}
param, err := json.Marshal(args)
if err != nil {
return "", err
}
js += "," + string(param)
}
js += ");"
return template.JS(js), nil
}
}
tmpl := template.Must(template.New("reflex-template").Funcs(funcs).Parse(t.text))
return &handler{
setup: setup,
template: tmpl,
}
}
type handler struct {
setup SetupFunc
template *template.Template
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
p := h.setup()
if p.ErrorHandler == nil {
p.ErrorHandler = defaultErrorHandler
}
if websocket.IsWebSocketUpgrade(r) {
h.handleWebsocket(p, w, r)
return
}
h.handleTemplate(p, w, r)
}
type eventCall struct {
Name string `json:"name"`
Args []interface{} `json:"args"`
Event Event `json:"event"`
}
func (h *handler) handleWebsocket(p *Page, w http.ResponseWriter, r *http.Request) {
ws, err := p.Upgrader.Upgrade(w, r, nil)
if err != nil {
p.ErrorHandler(w, r, err)
return
}
defer func() {
ws.Close()
}()
// send inital page data
ws.WriteJSON(struct {
ElementID string `json:"elementID"`
}{
ElementID: p.ElementID,
})
for {
e := &eventCall{}
err = websocket.ReadJSON(ws, e)
if err != nil {
if err == websocket.ErrCloseSent {
return
}
// TODO: Handle websocket disconnects
// preserve template state and try to reconnect?
// send a UUID on first connect, and if reconnect with same UUID load template state from memory?
p.ErrorHandler(w, r, err)
return
}
fn, ok := p.Events[e.Name]
if !ok {
continue
}
fnVal := reflect.ValueOf(fn)
fnType := reflect.TypeOf(fn)
args := make([]reflect.Value, 0, fnType.NumIn())
for i := 0; i < cap(args); i++ {
inType := fnType.In(i)
if inType == eventType {
args = append(args, reflect.ValueOf(e.Event))
} else if inType == requestType {
args = append(args, reflect.ValueOf(r))
} else {
for j := range e.Args {
val := reflect.ValueOf(e.Args[j])
if val.Type() == inType {
args = append(args, val)
e.Args = append(e.Args[:j], e.Args[j+1:]...)
break
} else if val.Type().ConvertibleTo(inType) {
args = append(args, val.Convert(inType))
e.Args = append(e.Args[:j], e.Args[j+1:]...)
break
}
}
}
}
out := fnVal.Call(args)
if len(out) > 0 {
err, ok := out[0].Interface().(error)
if ok {
p.ErrorHandler(w, r, err)
return
}
}
// TODO: ignore all other func returns? Error?
var b bytes.Buffer
h.template.Execute(&b, p.Data)
doc, err := html.Parse(&b)
if err != nil {
p.ErrorHandler(w, r, err)
return
}
b.Reset()
el := findElement(p.ElementID, doc)
if el == nil {
p.ErrorHandler(w, r, fmt.Errorf("No element found with an id of %s in the template", p.ElementID))
return
}
err = html.Render(&b, el)
if err != nil {
p.ErrorHandler(w, r, err)
return
}
ws.WriteMessage(websocket.TextMessage, b.Bytes())
}
}
func (h *handler) handleTemplate(p *Page, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
var b bytes.Buffer
err := h.template.Execute(&b, p.Data)
if err != nil {
p.ErrorHandler(w, r, err)
return
}
_, err = io.Copy(w, &b)
if err != nil {
log.Printf("Error Copying template data to template writer: %s", err)
}
}
func findElement(id string, parent *html.Node) *html.Node {
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if c.Type == html.ElementNode && len(c.Attr) > 0 {
for i := range c.Attr {
if c.Attr[i].Key == "id" && c.Attr[i].Val == id {
return c
}
}
}
el := findElement(id, c)
if el != nil {
return el
}
}
return nil
}
|
/**
括号匹配
查看左右括号是否匹配
假设只存在这[{(三种括号,并且只可能有一共六种字符可能
1. 如果是左半边,直接进栈
2. 如果是右半边,先比较栈顶是不是和其对应的左半边,不是则不匹配
*/
package stack
var (
leftHalf = map[string]struct{}{
"(": struct{}{},
"[": struct{}{},
"{": struct{}{},
}
rightToLeft = map[string]string{
")": "(",
"]": "[",
"}": "{",
}
)
type Parentheses struct {
*ArrayStack
s string
cnt int
}
func NewParentheses(s string, cnt int) *Parentheses {
return &Parentheses{
ArrayStack: NewArrayStack(cnt),
s: s,
cnt: cnt,
}
}
func (pth *Parentheses) match() bool {
for i := 0; i < pth.cnt; i++ {
ele := string(pth.s[i])
if _, ok := leftHalf[ele]; ok {
// 左半边
pth.Push(ele)
} else {
// 右半边
top := pth.Top()
if top != nil && top.(string) == rightToLeft[ele] {
pth.Pop()
} else {
return false
}
}
}
if pth.Top() == nil {
return true
} else {
return false
}
}
|
package tests
import (
"testing"
)
/**
* [745] Find Smallest Letter Greater Than Target
*
*
* Given a list of sorted characters letters containing only lowercase letters, and given a target letter target, find the smallest element in the list that is larger than the given target.
*
* Letters also wrap around. For example, if the target is target = 'z' and letters = ['a', 'b'], the answer is 'a'.
*
*
* Examples:
*
* Input:
* letters = ["c", "f", "j"]
* target = "a"
* Output: "c"
*
* Input:
* letters = ["c", "f", "j"]
* target = "c"
* Output: "f"
*
* Input:
* letters = ["c", "f", "j"]
* target = "d"
* Output: "f"
*
* Input:
* letters = ["c", "f", "j"]
* target = "g"
* Output: "j"
*
* Input:
* letters = ["c", "f", "j"]
* target = "j"
* Output: "c"
*
* Input:
* letters = ["c", "f", "j"]
* target = "k"
* Output: "c"
*
*
*
* Note:
*
* letters has a length in range [2, 10000].
* letters consists of lowercase letters, and contains at least 2 unique letters.
* target is a lowercase letter.
*
*
*/
func TestFindSmallestLetterGreaterThanTarget(t *testing.T) {
var cases = []struct {
input []byte
target byte
output byte
}{
{
input: []byte{'c', 'f', 'j'},
target: 'a',
output: 'c',
},
{
input: []byte{'c', 'f', 'j'},
target: 'c',
output: 'f',
},
{
input: []byte{'a','b'},
target: 'z',
output: 'a',
},
}
for _, c := range cases {
x := nextGreatestLetter(c.input, c.target)
if x != c.output {
t.Fail()
}
}
}
// submission codes start here
func nextGreatestLetter(letters []byte, target byte) byte {
i, j := 0, len(letters)
for i < j {
m := (i + j) / 2
if letters[m] <= target {
i = m + 1
} else {
j = m
}
}
return letters[j % len(letters)]
}
// submission codes end
|
package main
import (
"reflect"
"testing"
)
var splitargtest = []struct {
in string
out []string
}{
{"", nil},
{" ", nil},
{" ", nil},
{"x", []string{"x"}},
{"x x", []string{"x", "x"}},
{"'x' 'x'", []string{"x", "x"}},
{"'x' 'x'", []string{"x", "x"}},
{"' x ' 'x'", []string{" x ", "x"}},
{" ' x ' 'x'", []string{" x ", "x"}},
{" ' x ' 'x' ", []string{" x ", "x"}},
{" ' x ' 'x' x", []string{" x ", "x", "x"}},
{" \" x \" 'x' x", []string{" x ", "x", "x"}},
{" \" x \" 'x' x", []string{" x ", "x", "x"}},
{" \" x \"x 'x'x xx", []string{" x x", "xx", "xx"}},
}
func TestSplitarg(t *testing.T) {
for _, tt := range splitargtest {
out := splitarg(tt.in)
if !reflect.DeepEqual(out, tt.out) {
t.Errorf("splitquoted(%q) => %q, want %q", tt.in, out, tt.out)
}
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package inputs
import (
"context"
"strings"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bundles/cros/inputs/fixture"
"chromiumos/tast/local/bundles/cros/inputs/pre"
"chromiumos/tast/local/bundles/cros/inputs/testserver"
"chromiumos/tast/local/bundles/cros/inputs/util"
"chromiumos/tast/local/chrome/ime"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/touch"
"chromiumos/tast/local/chrome/uiauto/vkb"
"chromiumos/tast/local/chrome/useractions"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: VirtualKeyboardMultitouch,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Checks typing on virtual keyboard with multiple simultaneous touches",
Contacts: []string{"essential-inputs-gardener-oncall@google.com", "essential-inputs-team@google.com"},
Attr: []string{"group:mainline", "group:input-tools"},
SoftwareDeps: []string{"chrome", "google_virtual_keyboard"},
HardwareDeps: hwdep.D(pre.InputsStableModels),
SearchFlags: util.IMESearchFlags([]ime.InputMethod{ime.EnglishUSWithInternationalKeyboard}),
Timeout: 5 * time.Minute,
Params: []testing.Param{
{
Fixture: fixture.TabletVKWithMultitouch,
},
{
Name: "lacros",
Fixture: fixture.LacrosTabletVKWithMultitouch,
ExtraSoftwareDeps: []string{"lacros_stable"},
ExtraAttr: []string{"informational"},
},
},
})
}
func VirtualKeyboardMultitouch(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(fixture.FixtData).Chrome
tconn := s.FixtValue().(fixture.FixtData).TestAPIConn
uc := s.FixtValue().(fixture.FixtData).UserContext
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree")
its, err := testserver.LaunchBrowser(ctx, s.FixtValue().(fixture.FixtData).BrowserType, cr, tconn)
if err != nil {
s.Fatal("Failed to launch inputs test server: ", err)
}
defer its.CloseAll(cleanupCtx)
vkbCtx := vkb.NewContext(cr, tconn)
ui := uiauto.New(tconn)
tsw, tcc, err := touch.NewTouchscreenAndConverter(ctx, tconn)
if err != nil {
s.Fatal("Fail to get touch screen: ", err)
}
defer tsw.Close()
inputMethod := ime.EnglishUSWithInternationalKeyboard
if err := inputMethod.InstallAndActivateUserAction(uc)(ctx); err != nil {
s.Fatal("Failed to set input method: ", err)
}
uc.SetAttribute(useractions.AttributeInputMethod, inputMethod.Name)
inputField := testserver.TextAreaAutoShiftInSentence
stw, err := tsw.NewSingleTouchWriter()
if err != nil {
s.Fatal("Failed to get touch writer: ", err)
}
defer stw.Close()
touchAndHold := func(finder *nodewith.Finder) uiauto.Action {
return func(ctx context.Context) error {
loc, err := ui.Location(ctx, finder)
if err != nil {
return errors.Wrapf(err, "failed to get the location of the node %v", finder)
}
x, y := tcc.ConvertLocation(loc.CenterPoint())
if err := stw.Move(x, y); err != nil {
return errors.Wrap(err, "failed to move the touch")
}
testing.Sleep(ctx, 50*time.Millisecond)
return nil
}
}
mousePressAndHold := func(finder *nodewith.Finder) uiauto.Action {
return uiauto.Combine("mouse press on node center point and hold",
ui.MouseMoveTo(finder, 10*time.Millisecond),
mouse.Press(tconn, mouse.LeftButton),
uiauto.Sleep(50*time.Millisecond),
)
}
releaseTouch := func() uiauto.Action {
return func(ctx context.Context) error {
return stw.End()
}
}
waitUntilLowercase := vkbCtx.WaitForKeysExist([]string{"a"})
waitUntilUppercase := vkbCtx.WaitForKeysExist([]string{"A"})
shiftKeyFinder := nodewith.Name("shift").Ancestor(vkb.NodeFinder.HasClass("key_pos_shift_left"))
backspaceKeyFinder := nodewith.Name("backspace")
zKeyFinder := vkb.KeyByNameIgnoringCase("z")
xKeyFinder := vkb.KeyByNameIgnoringCase("x")
vKeyFinder := vkb.KeyByNameIgnoringCase("v")
validateAction := uiauto.NamedCombine("Verify multitouch typing on VK",
// Basic multitouch typing.
its.ClickFieldUntilVKShown(inputField),
touchAndHold(zKeyFinder),
mousePressAndHold(xKeyFinder),
// First character should be uppercase for autoshifted text field.
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "Z"),
releaseTouch(),
touchAndHold(vKeyFinder),
// VK should now be deshifted, so remaining characters should be lowercase.
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "Zx"),
mouse.Release(tconn, mouse.LeftButton),
releaseTouch(),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "Zxv"),
// Holding shift while typing.
touchAndHold(shiftKeyFinder),
waitUntilUppercase,
vkbCtx.TapKeys(strings.Split("AB", "")),
releaseTouch(),
waitUntilLowercase,
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "ZxvAB"),
// Holding shift while typing with caps lock on.
vkbCtx.TapKey("caps lock"),
waitUntilUppercase,
touchAndHold(shiftKeyFinder),
waitUntilLowercase,
vkbCtx.TapKeys(strings.Split("cd", "")),
releaseTouch(),
waitUntilUppercase,
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "ZxvABcd"),
vkbCtx.TapKey("caps lock"),
waitUntilLowercase,
vkbCtx.HideVirtualKeyboard(),
its.Clear(inputField),
its.ClickFieldUntilVKShown(inputField),
// Holding backspace while typing other keys.
vkbCtx.TapKey("A"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "A"),
touchAndHold(backspaceKeyFinder),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), ""),
vkbCtx.TapKey("B"),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "B"),
// Backspace should be cancelled and not delete any more characters.
uiauto.Sleep(2*time.Second),
releaseTouch(),
util.WaitForFieldTextToBe(tconn, inputField.Finder(), "B"),
)
if err := uiauto.UserAction("Multitouch typing on virtual keyboard",
validateAction,
uc,
&useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeFeature: useractions.FeatureVKTyping,
useractions.AttributeInputField: string(inputField),
},
},
)(ctx); err != nil {
s.Fatal("Failed to multitouch type on virtual keyboard: ", err)
}
}
|
// Copyright 2018 The go-Dacchain Authors
// This file is part of the go-Dacchain library.
//
// The go-Dacchain library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-Dacchain library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-Dacchain library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"bytes"
"crypto/ecdsa"
"encoding/json"
"math/big"
"testing"
"encoding/binary"
"fmt"
"github.com/Dacchain/go-Dacchain/common"
"github.com/Dacchain/go-Dacchain/common/hexutil"
"github.com/Dacchain/go-Dacchain/crypto"
"github.com/Dacchain/go-Dacchain/rlp"
"io"
"sync/atomic"
"regexp"
)
// The values in those tests are from the Transaction Tests
var (
emptyTx = NewTransaction(
0,
common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
big.NewInt(0), 0, big.NewInt(0),
nil, 0, nil, nil, nil, nil, "")
//rightvrsTx, _ = NewTransaction(
// 3,
// common.HexToAddress("b94f5374fce5edbc8e2a8697c15331677e6ebf0b"),
// big.NewInt(10),
// 2000,
// big.NewInt(1),
// common.FromHex("5544"), 0, nil, nil,nil,nil,make([]byte,0),
//).WithSignature(
// HomesteadSigner{},
// common.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"),
//)
)
func TestTransactionSigHash(t *testing.T) {
//var homestead HomesteadSigner
//if homestead.Hash(emptyTx) != common.HexToHash("c775b99e7ad12f50d819fcd602390467e28141316969f4b57f0626f74fe3b386") {
// t.Errorf("empty transaction hash mismatch, got %x", emptyTx.Hash())
//}
//if homestead.Hash(rightvrsTx) != common.HexToHash("fe7a79529ed5f7c3375d06b26b186a8644e0e16c373d7a12be41c62d6042b77a") {
// t.Errorf("RightVRS transaction hash mismatch, got %x", rightvrsTx.Hash())
//}
}
func TestTransactionEncode(t *testing.T) {
//txb, err := rlp.EncodeToBytes(rightvrsTx)
//if err != nil {
// t.Fatalf("encode error: %v", err)
//}
//should := common.FromHex("f86603018207d094b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a8255441ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa08887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a380808080c0")
//if !bytes.Equal(txb, should) {
// t.Errorf("encoded RLP mismatch, got %x", txb)
//}
}
func decodeTx(data []byte) (*Transaction, error) {
var tx Transaction
t, err := &tx, rlp.Decode(bytes.NewReader(data), &tx)
return t, err
}
func defaultTestKey() (*ecdsa.PrivateKey, common.Address) {
key, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
addr := crypto.PubkeyToAddress(key.PublicKey)
return key, addr
}
func TestRecipientEmpty(t *testing.T) {
_, addr := defaultTestKey()
tx, err := decodeTx(common.Hex2Bytes("f8498080808080011ca09b16de9d5bdee2cf56c28d16275a4da68cd30273e2525f3959f5d62557489921a0372ebd8fb3345f7db7b5a86d42e24d36e983e259b0664ceb8c227ec9af572f3d"))
if err != nil {
t.Error(err)
t.FailNow()
}
from, err := Sender(DavinciSigner{}, tx)
if err != nil {
t.Error(err)
t.FailNow()
}
if addr != from {
t.Error("derived address doesn't match")
}
}
func TestRecipientNormal(t *testing.T) {
_, addr := defaultTestKey()
tx, err := decodeTx(common.Hex2Bytes("f85d80808094000000000000000000000000000000000000000080011ca0527c0d8f5c63f7b9f41324a7c8a563ee1190bcbf0dac8ab446291bdbf32f5c79a0552c4ef0a09a04395074dab9ed34d3fbfb843c2f2546cc30fe89ec143ca94ca6"))
if err != nil {
t.Error(err)
t.FailNow()
}
from, err := Sender(DavinciSigner{}, tx)
if err != nil {
t.Error(err)
t.FailNow()
}
if addr != from {
t.Error("derived address doesn't match")
}
}
// Tests that transactions can be correctly sorted according to their price in
// decreasing order, but at the same time with increasing nonces when issued by
// the same account.
func TestTransactionPriceNonceSort(t *testing.T) {
// Generate a batch of accounts to start with
keys := make([]*ecdsa.PrivateKey, 25)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
}
signer := DavinciSigner{}
// Generate a batch of transactions with overlapping values, but shifted nonces
groups := map[common.Address]Transactions{}
for start, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
for i := 0; i < 25; i++ {
tx, _ := SignTx(NewTransaction(uint64(start+i), common.Address{}, big.NewInt(100), 100, big.NewInt(int64(start+i)), nil, 0, nil, nil, nil, nil, ""), signer, key)
groups[addr] = append(groups[addr], tx)
}
}
// Sort the transactions and cross check the nonce ordering
txset := NewTransactionsByPriceAndNonce(signer, groups)
txs := Transactions{}
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
txs = append(txs, tx)
txset.Shift()
}
if len(txs) != 25*25 {
t.Errorf("expected %d transactions, found %d", 25*25, len(txs))
}
for i, txi := range txs {
fromi, _ := Sender(signer, txi)
// Make sure the nonce order is valid
for j, txj := range txs[i+1:] {
fromj, _ := Sender(signer, txj)
if fromi == fromj && txi.Nonce() > txj.Nonce() {
t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce())
}
}
// Find the previous and next nonce of this account
prev, next := i-1, i+1
for j := i - 1; j >= 0; j-- {
if fromj, _ := Sender(signer, txs[j]); fromi == fromj {
prev = j
break
}
}
for j := i + 1; j < len(txs); j++ {
if fromj, _ := Sender(signer, txs[j]); fromi == fromj {
next = j
break
}
}
// Make sure that in between the neighbor nonces, the transaction is correctly positioned price wise
for j := prev + 1; j < next; j++ {
fromj, _ := Sender(signer, txs[j])
if j < i && txs[j].GasPrice().Cmp(txi.GasPrice()) < 0 {
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice())
}
if j > i && txs[j].GasPrice().Cmp(txi.GasPrice()) > 0 {
t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) > tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice())
}
}
}
}
// TestTransactionJSON tests serializing/de-serializing to/from JSON.
func TestTransactionJSON(t *testing.T) {
key, err := crypto.GenerateKey()
if err != nil {
t.Fatalf("could not generate key: %v", err)
}
signer := DavinciSigner{chainId: common.Big1}
for i := uint64(0); i < 25; i++ {
var tx *Transaction
switch i % 2 {
case 0:
tx = NewTransaction(i, common.Address{1}, common.Big0, 1, common.Big2, []byte("abcdef"), 0, nil, nil, nil, nil, "")
case 1:
tx = NewContractCreation(i, common.Big0, 1, common.Big2, []byte("abcdef"), `[{"constant":true,"inputs":[],"name":"mybalance","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"payable":true,"stateMutability":"payable","type":"fallback"}]`,nil)
}
tx, err := SignTx(tx, signer, key)
if err != nil {
t.Fatalf("could not sign transaction: %v", err)
}
data, err := json.Marshal(tx)
if err != nil {
t.Errorf("json.Marshal failed: %v", err)
}
var parsedTx *Transaction
if err := json.Unmarshal(data, &parsedTx); err != nil {
t.Errorf("json.Unmarshal failed: %v", err)
}
// compare nonce, price, gaslimit, recipient, amount, payload, V, R, S
if tx.Hash() != parsedTx.Hash() {
t.Errorf("parsed tx differs from original tx, want %v, got %v", tx, parsedTx)
}
if tx.ChainId().Cmp(parsedTx.ChainId()) != 0 {
t.Errorf("invalid chain id, want %d, got %d", tx.ChainId(), parsedTx.ChainId())
}
}
}
func TestTransaction_DecodeRLP(t *testing.T) {
errSign := "0xf872808502540be40083030d40943e106d2004a5bdc48be21c28e46c9e0c2d28d69f8ad3c21bcecceda1000000801ca0b2df725d4f5647ea4199d375e0fc1bb17363ee551a3f96842cb4817b5e35b57ca055c8ab5061c3865768838ac8f5734071327dce29042a595a4088df5379a973f2808080"
// rightSign := "0xf872098502540be40083030d40943e106d2004a5bdc48be21c28e46c9e0c2d28d69f8ad3c21bcecceda1000000801ca0828dafbff984029dea5c8fb69e0d82a54958d542680a322cc834425c71fadf59a00c0a5777be5acfeadc00e23d183b5d88262ebf4eaa19e5e74c2c3fe4a2df016b80c080"
encodedTx, _ := hexutil.Decode(errSign)
fmt.Println(encodedTx)
tx := new(transaction)
if err := rlp.DecodeBytes(encodedTx, tx); err != nil {
fmt.Printf("rlp decode error:%v trx:%v\n", err, tx)
} else {
fmt.Println(tx)
}
}
func TestTxDifference(t *testing.T) {
v1 := Vote{nil, 1}
json.Marshal(v1)
v2 := Vote{nil, 2}
v3 := Vote{nil, 3}
buf := new(bytes.Buffer)
var data = []interface{}{
v1,
v2,
v3,
}
for _, v := range data {
err := binary.Write(buf, binary.LittleEndian, v)
if err != nil {
fmt.Println("binary.Write failed:", err)
}
}
fmt.Printf("%x", buf.Bytes())
fmt.Println(buf.Bytes())
}
type transaction struct {
data tdata
// caches
hash atomic.Value
size atomic.Value
from atomic.Value
}
type tdata struct {
AccountNonce uint64 `json:"nonce" gencodec:"required"`
Price *big.Int `json:"gasPrice" gencodec:"required"`
GasLimit uint64 `json:"gas" gencodec:"required"`
Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation
Amount *big.Int `json:"value" gencodec:"required"`
Payload []byte `json:"input" gencodec:"required"`
// Signature values
V *big.Int `json:"v" gencodec:"required"`
R *big.Int `json:"r" gencodec:"required"`
S *big.Int `json:"s" gencodec:"required"`
Action uint `json:"action" gencodec:"required"` // 额外动作默认0, 1-代理注册, 2-投票操作。 投票传投票动作,即投xxx票
Vote []byte `json:"vote" rlp:"nil"`
Nickname []byte `json:"nickname" rlp:"nil"`
// This is only used when marshaling to JSON.
Hash *common.Hash `json:"hash" rlp:"-"`
//资产符号,作为资产的唯一标识。当Action 为ActionTrans时有意义。
AssetSymbol string `json:"assetSymbol,omitempty" rlp:"nil"`
//资产信息,当Action 为 ActionPublishAsset 时有意义
AssetInfo *AssetInfo `json:"assetInfo,omitempty" rlp:"nil"`
}
// EncodeRLP implements rlp.Encoder
func (tx *transaction) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, &tx.data)
}
// DecodeRLP implements rlp.Decoder
func (tx *transaction) DecodeRLP(s *rlp.Stream) error {
_, size, _ := s.Kind()
err := s.Decode(&tx.data)
if err == nil {
tx.size.Store(common.StorageSize(rlp.ListSize(size)))
}
return err
}
func TestDeriveSha(t *testing.T) {
testString := []string{"12", "23", "23"}
fmt.Println(testString)
enc, err := json.Marshal(testString)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(enc)
var dec []string
err = json.Unmarshal(enc, dec)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(dec)
}
func TestDecode(t *testing.T) {
enc, err := hexutil.Decode("0xf8b10784ee6b2800830186a0940dff1151bb88110b679babcfd4599656203df7cc80b8441d834a1b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003206808080808080819ba0a00cbcfe542320ed91cbfafb52f9a420e4909110d7aaf80d53c163f0d1f58e78a034b5057cc30bdd8a48afcd2890c7bacd91e2d76aefee2284492390027307578e")
if err != nil {
fmt.Println(err)
return
}
tx := new(Transaction)
err = rlp.DecodeBytes(enc, tx)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(tx)
}
func TestRecoverFromAddress(t *testing.T) {
enc, err := hexutil.Decode("0xf8738084ee6b280083015f90945d17e0168071c56882b403451966a31cb508d6c9880b1a2bc2ec500000808080808080808025a030abd95402a9df641009e6a8a805df9795fc8704e2b576eaf0b6361eae4389aba0146818e069e3c3fdad2c798a2e3c9569e45bcf3608345bbbf7bbe8018ca53b81")
if err != nil {
t.Fatal(err)
}
tx := new(Transaction)
err = rlp.DecodeBytes(enc, tx)
if err != nil {
t.Fatal(err)
}
fmt.Println(tx)
//
//recoverPlain(s.Hash(tx), tx.data.R, tx.data.S, V, true)
//var f DavinciSigner
//addresses, err := f.Sender(tx)
//if err != nil {
// t.Fatal(err)
//}
//fmt.Println(addresses.Hex())
}
func TestAssetInfoToBytes(t *testing.T) {
var assetInfo AssetInfo
assetInfo.Desc = "aa"
assetInfo.Issuer = &common.Address{}
assetInfo.Name = "DAC"
assetInfo.Symbol = "A"
assetInfo.Supply = big.NewInt(1111)
toBytes, err := AssetInfoToBytes(assetInfo)
if err != nil {
t.Fatal(err)
}
assetInfo2, err := BytesToAssetInfo(toBytes)
if err != nil {
t.Fatal(err)
}
fmt.Println(assetInfo2)
}
func TestDacAddress(t *testing.T) {
to := "DAC60aac5adbb14ea09b3a01f04b56aa8b5db420f55"
match, err := regexp.MatchString("(?i:^DAC|0x)[0-9a-f]{40}[0-9A-Za-z]{0,32}$", to)
//"(?i:^DAC|0x)"
//match, err := regexp.MatchString("^DAC[0-9a-f]{40}[0-9A-Za-z]{0,32}$", to)
// match, err := regexp.MatchString("(^0-9A-Za-z)", to)
if err != nil {
t.Fatal(err)
}
fmt.Println(match)
}
|
// Copyright 2020 Adobe. All rights reserved.
// This file is licensed to you under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy
// of the License at http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under
// the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
// OF ANY KIND, either express or implied. See the License for the specific language
// governing permissions and limitations under the License.
package ims
import (
"fmt"
"log"
"github.com/adobe/ims-go/ims"
)
func (i Config) validateGetOrganizationsConfig() error {
switch i.OrgsApiVersion{
case "v1", "v2", "v3", "v4", "v5", "v6":
default:
return fmt.Errorf("invalid API version parameter, use something like v5")
}
switch {
case i.AccessToken == "":
return fmt.Errorf("missing access token parameter")
case i.URL == "":
return fmt.Errorf("missing IMS base URL parameter")
default:
log.Println("all needed parameters verified not empty")
}
return nil
}
// GetOrganizations requests the user's organizations using an access token.
func (i Config) GetOrganizations() (string, error) {
err := i.validateGetOrganizationsConfig()
if err != nil {
return "", fmt.Errorf("invalid parameters for organizations: %v", err)
}
httpClient, err := i.httpClient()
if err != nil {
return "", fmt.Errorf("error creating the HTTP Client: %v", err)
}
c, err := ims.NewClient(&ims.ClientConfig{
URL: i.URL,
Client: httpClient,
})
if err != nil {
return "", fmt.Errorf("error creating the client: %v", err)
}
organizations, err := c.GetOrganizations(&ims.GetOrganizationsRequest{
AccessToken: i.AccessToken,
ApiVersion: i.OrgsApiVersion,
})
if err != nil {
return "", err
}
return string(organizations.Body), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.