text stringlengths 11 4.05M |
|---|
package webknest
import (
"time"
)
// User contains login credentials and details about their profile including
// subscription type, which will dictate certain capabilities
type User struct {
ID int `json:"id"`
Username string `json:"username"`
Password string `json:"-"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Email string `json:"email"`
SubscriptionType int `json:"sub_type"`
}
// DetailUpdate allows for easy updating of user details
type DetailUpdate struct {
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
SubscriptionType int `json:"sub_type"`
}
// PasswordUpdate allows for easy sole modification of passwords
type PasswordUpdate struct {
CurrentPassword string `json:"current_password"`
NewPassword string `json:"new_password"`
}
// UserService is the interface through which the handlers will interact with
// the user data model
type UserService interface {
ListBySubscriptionType(int) ([]*User, error)
GetByID(int) (*User, error)
GetByUsername(string) (*User, error)
Create(*User) (int, error)
UpdateDetails(*User, *DetailUpdate) error
ChangePassword(int, string, string) error
ChangeEmail(int, string) error
Delete(int) error
}
// Folder is the main construct that will associated with users.
type Folder struct {
OwnerID int `json:"owner"`
FolderName string `json:"foldername"`
S3Path string `json:"s3_path"`
UploadTime time.Time `json:"upload_time"`
NumElements int `json:"num_elements"`
Completed bool `json:"completed"`
Downloaded bool `json:"downloaded"`
}
// FolderUpdate allows for easy modification of the two important flags.
type FolderUpdate struct {
Completed bool `json:"completed"`
Downloaded bool `json:"downloaded"`
}
// FolderService is the interface through which the handlers will interact with
// the folder data model
type FolderService interface {
ListByUser(int) (*Folder, error)
GetByName(int, string) (*Folder, error)
Create(*Folder) (int, error)
Update(*Folder, *FolderUpdate) error
Delete(int, string) error
}
|
package main
import (
"fmt"
"strings"
)
/*
There are N dominoes in a line, and we place each domino vertically upright.
In the beginning, we simultaneously push some of the dominoes either to the left or to the right.
After each second, each domino that is falling to the left pushes the adjacent domino on the left.
Similarly, the dominoes falling to the right push their adjacent dominoes standing on the right.
When a vertical domino has dominoes falling on it from both sides, it stays still due to the balance of the forces.
For the purposes of this question, we will consider that a falling domino expends no additional force to a falling or already fallen domino.
Given a string "S" representing the initial state. S[i] = 'L', if the i-th domino has been pushed to the left; S[i] = 'R', if the i-th domino has been pushed to the right; S[i] = '.', if the i-th domino has not been pushed.
Return a string representing the final state.
Example 1:
Input: ".L.R...LR..L.."
Output: "LL.RR.LLRRLL.."
Example 2:
Input: "RR.L"
Output: "RR.L"
Explanation: The first domino expends no additional force on the second domino.
Note:
0 <= N <= 10^5
String dominoes contains only 'L', 'R' and '.'
*/
func pushDominoes(dominoes string) string {
dlen := len(dominoes)
result,i,j := "",0,0
for i < dlen {
if dominoes[i] == '.' {
i += 1
}
}
if dominoes[i] == 'L' {
result += strings.Repeat("L",i)
}
for j < dlen && i < dlen {
for i < dlen && dominoes[i] != 'R' {
i += 1
result += string(dominoes[i])
}
j = i
for j < dlen && dominoes[j] != 'L' {
j += 1
}
// set i-j domino
}
return dominoes
}
func main() {
fmt.Println(pushDominoes(".RL"))
}
|
package dbServer
import (
"testing"
)
func TestMysqlApi_GetWxApp(t *testing.T) {
tests := []struct{ appId, appSec string }{
{"wx3be7b35d2d7a8256", "2"},
{"wx293dbb0f011bcac3", "3"},
}
mysqlApi := CreateMysqlApi()
for _, tt := range tests {
if appSec, _ := mysqlApi.GetWxApp(tt.appId); appSec != tt.appSec {
t.Errorf("select mysql by appid get app secret is error")
}
}
}
|
package packet
type Metadata struct {
Packet []byte
}
func (m *Metadata) Reset() {
m.Packet = nil
}
|
package jsonutils
import (
"encoding/json"
"github.com/joshprzybyszewski/cribbage/model"
)
// UnmarshalGame takes in json marshaled bytes of a model.Game
// The main advantage is that the list of actions can be deserialized
// into the interface{} type.
func UnmarshalGame(b []byte) (model.Game, error) {
game := model.Game{}
err := json.Unmarshal(b, &game)
if err != nil {
return model.Game{}, err
}
for i := range game.Actions {
a := game.Actions[i]
b, err := json.Marshal(a)
if err != nil {
return model.Game{}, err
}
pa, err := UnmarshalPlayerAction(b)
if err != nil {
return model.Game{}, err
}
game.Actions[i] = pa
}
if game.Hands == nil {
game.Hands = make(map[model.PlayerID][]model.Card, len(game.Players))
}
if game.BlockingPlayers == nil {
game.BlockingPlayers = make(map[model.PlayerID]model.Blocker, len(game.Players))
}
if game.PlayerColors == nil {
game.PlayerColors = make(map[model.PlayerID]model.PlayerColor, len(game.Players))
}
return game, nil
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bricker
import (
"github.com/dirkjabl/bricker/connector"
)
// AttachConnector adds a named connector to the bricker.
// The name must be unique and should not used before.
func (b *Bricker) Attach(c connector.Connector, n string) error {
if _, ok := b.connection[n]; ok { // name exists, no add
return NewError(ErrorConnectorNameExists)
}
b.connection[n] = c
if b.first == "" {
b.first = n
}
go b.read(c, n) // start working for incoming events
return nil
}
// ReleaseConnector take a connector from the bricker.
func (b *Bricker) Release(n string) error {
if _, ok := b.connection[n]; !ok { // name does not exists
return NewError(ErrorNoConnectorToRelease)
}
if n == b.first {
b.first = ""
} // TODO: search for a new connector as first
delete(b.connection, n)
return nil
}
// Internal method: computeConnectorsName try to compute the connectors name from the given parameter.
// The parameter could be a string with the name, a uid of a device (uint32) or nil, then the
// first registered connector will be used.
func (b *Bricker) computeConnectorsName(d interface{}) string {
switch value := d.(type) {
case string:
return value
case uint32:
if n, ok := b.uids[value]; ok {
return n
} // when not ok, than first connector, if possible
}
return b.first
}
|
// Copyright 2018 SumUp Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package content
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/sumup-oss/go-pkgs/os/ostest"
"github.com/sumup-oss/vaulted/pkg/aes"
testAes "github.com/sumup-oss/vaulted/pkg/aes/test"
"github.com/sumup-oss/vaulted/pkg/base64"
"github.com/sumup-oss/vaulted/pkg/base64/test"
"github.com/sumup-oss/vaulted/pkg/pkcs7"
"github.com/sumup-oss/vaulted/pkg/rsa"
"github.com/sumup-oss/vaulted/pkg/vaulted/passphrase"
)
func TestNewLegacyEncryptedContentService(t *testing.T) {
t.Run(
"it creates a new LegacyEncryptedContentService with 'base64Service' and 'aesService' arguments",
func(t *testing.T) {
t.Parallel()
base64Svc := base64.NewBase64Service()
aesSvc := aes.NewAesService(
pkcs7.NewPkcs7Service(),
)
actual := NewLegacyEncryptedContentService(
base64Svc,
aesSvc,
)
assert.Equal(t, base64Svc, actual.base64Service)
assert.Equal(t, aesSvc, actual.aesService)
},
)
}
func TestEncryptedContentService_Serialize(t *testing.T) {
t.Run(
"when base64 encoding of 'encryptedContent' fails, it returns an error",
func(t *testing.T) {
t.Parallel()
mockBase64Svc := &test.MockBase64Service{}
encContent := NewEncryptedContent(
[]byte("1a2b3c4d"),
)
fakeErr := errors.New("serializeErr")
mockBase64Svc.On(
"Serialize",
encContent.Ciphertext,
).Return(nil, fakeErr)
svc := NewLegacyEncryptedContentService(
mockBase64Svc,
aes.NewAesService(
pkcs7.NewPkcs7Service(),
),
)
actualReturn, actualErr := svc.Serialize(encContent)
require.Nil(t, actualReturn)
assert.Equal(t, fakeErr, actualErr)
mockBase64Svc.AssertExpectations(t)
},
)
t.Run(
"when base64 encoding of 'encryptedContent' succeeds, it returns it base64 encoded",
func(t *testing.T) {
t.Parallel()
b64Service := base64.NewBase64Service()
svc := NewLegacyEncryptedContentService(
base64.NewBase64Service(),
aes.NewAesService(
pkcs7.NewPkcs7Service(),
),
)
encryptedContent := NewEncryptedContent(
[]byte(
"1a2b3c4d"),
)
expectedReturn, err := b64Service.Serialize(encryptedContent.Ciphertext)
require.Nil(t, err)
actualReturn, actualErr := svc.Serialize(encryptedContent)
require.Nil(t, actualErr)
assert.Equal(t, expectedReturn, actualReturn)
},
)
}
func TestEncryptedContentService_Deserialize(t *testing.T) {
t.Run(
"when base64 decoding of 'encoded' fails, it returns an error",
func(t *testing.T) {
t.Parallel()
mockBase64Svc := &test.MockBase64Service{}
encodedArg := []byte("1a2b3c4d")
fakeErr := errors.New("serializeErr")
mockBase64Svc.On(
"Deserialize",
encodedArg,
).Return(nil, fakeErr)
svc := NewLegacyEncryptedContentService(
mockBase64Svc,
aes.NewAesService(
pkcs7.NewPkcs7Service(),
),
)
actualReturn, actualErr := svc.Deserialize(encodedArg)
require.Nil(t, actualReturn)
assert.Contains(
t,
actualErr.Error(),
"failed to deserialize base64 encoded encrypted content",
)
mockBase64Svc.AssertExpectations(t)
},
)
t.Run(
"when base64 decoding of 'encoded' succeeds, it returns it encrypted content",
func(t *testing.T) {
t.Parallel()
b64Service := base64.NewBase64Service()
svc := NewLegacyEncryptedContentService(
b64Service,
aes.NewAesService(
pkcs7.NewPkcs7Service(),
),
)
ciphertext := []byte("1a2b3c4d")
encryptedPassphrase := NewEncryptedContent(ciphertext)
encoded, err := b64Service.Serialize(encryptedPassphrase.Ciphertext)
require.Nil(t, err)
actualReturn, actualErr := svc.Deserialize(encoded)
require.Nil(t, actualErr)
assert.Equal(t, ciphertext, actualReturn.Ciphertext)
},
)
}
func TestEncryptedContentService_Encrypt(t *testing.T) {
t.Run(
"when encryption of 'content' fails, it returns error",
func(t *testing.T) {
t.Parallel()
b64Svc := base64.NewBase64Service()
osExecutor := ostest.NewFakeOsExecutor(t)
encPassphraseSvc := passphrase.NewEncryptedPassphraseService(
b64Svc,
rsa.NewRsaService(osExecutor),
)
passphraseArg, err := encPassphraseSvc.GeneratePassphrase(16)
require.Nil(t, err)
contentArg := NewContent(
[]byte("hello"),
)
fakeErr := errors.New("fakeEncryptError")
mockAesSvc := &testAes.MockAesService{}
mockAesSvc.Test(t)
mockAesSvc.On(
"EncryptCBC",
passphraseArg.Content,
[]byte(
contentArg.Plaintext,
),
).Return(
nil,
fakeErr,
)
encryptedContentSvc := NewLegacyEncryptedContentService(
b64Svc,
mockAesSvc,
)
actualReturn, actualErr := encryptedContentSvc.Encrypt(
passphraseArg,
contentArg,
)
require.Nil(t, actualReturn)
assert.Contains(t, actualErr.Error(), fakeErr.Error())
mockAesSvc.AssertExpectations(t)
},
)
t.Run(
"when encryption of 'content' succeeds, it returns encrypted content",
func(t *testing.T) {
t.Parallel()
b64Svc := base64.NewBase64Service()
osExecutor := ostest.NewFakeOsExecutor(t)
encPassphraseSvc := passphrase.NewEncryptedPassphraseService(
b64Svc,
rsa.NewRsaService(osExecutor),
)
passphraseArg, err := encPassphraseSvc.GeneratePassphrase(16)
require.Nil(t, err)
contentArg := NewContent(
[]byte("hello"),
)
aesSvc := aes.NewAesService(
pkcs7.NewPkcs7Service(),
)
encryptedContentSvc := NewLegacyEncryptedContentService(
b64Svc,
aesSvc,
)
actualReturn, actualErr := encryptedContentSvc.Encrypt(
passphraseArg,
contentArg,
)
require.Nil(t, actualErr)
assert.NotContains(
t,
string(
actualReturn.Ciphertext,
),
string(
contentArg.Plaintext,
),
)
assert.IsType(
t,
actualReturn,
&EncryptedContent{},
)
},
)
}
func TestEncryptedContentService_Decrypt(t *testing.T) {
t.Run(
"when decryption of 'encryptedContent' fails, it returns error",
func(t *testing.T) {
t.Parallel()
b64Svc := base64.NewBase64Service()
osExecutor := ostest.NewFakeOsExecutor(t)
encPassphraseSvc := passphrase.NewEncryptedPassphraseService(
b64Svc,
rsa.NewRsaService(osExecutor),
)
passphraseArg, err := encPassphraseSvc.GeneratePassphrase(16)
require.Nil(t, err)
encryptedContentArg := NewEncryptedContent(
[]byte("1a2b3c4"),
)
fakeErr := errors.New("fakeDecryptError")
mockAesSvc := &testAes.MockAesService{}
mockAesSvc.Test(t)
mockAesSvc.On(
"DecryptCBC",
passphraseArg.Content,
[]byte(
encryptedContentArg.Ciphertext,
),
).Return(
nil,
fakeErr,
)
encryptedContentSvc := NewLegacyEncryptedContentService(
b64Svc,
mockAesSvc,
)
actualReturn, actualErr := encryptedContentSvc.Decrypt(
passphraseArg,
encryptedContentArg,
)
require.Nil(t, actualReturn)
assert.Contains(t, actualErr.Error(), fakeErr.Error())
mockAesSvc.AssertExpectations(t)
},
)
t.Run(
"when decryption of 'encryptedContent' succeeds, it returns decrypted content",
func(t *testing.T) {
t.Parallel()
aesSvc := aes.NewAesService(
pkcs7.NewPkcs7Service(),
)
b64Svc := base64.NewBase64Service()
osExecutor := ostest.NewFakeOsExecutor(t)
encPassphraseSvc := passphrase.NewEncryptedPassphraseService(
b64Svc,
rsa.NewRsaService(osExecutor),
)
passphraseArg, err := encPassphraseSvc.GeneratePassphrase(16)
require.Nil(t, err)
contentArg := NewContent(
[]byte("hello"),
)
encryptedContentSvc := NewLegacyEncryptedContentService(
b64Svc,
aesSvc,
)
encryptedContentArg, err := encryptedContentSvc.Encrypt(
passphraseArg,
contentArg,
)
require.Nil(t, err)
actualReturn, actualErr := encryptedContentSvc.Decrypt(
passphraseArg,
encryptedContentArg,
)
require.Nil(t, actualErr)
assert.Equal(t, contentArg.Plaintext, actualReturn.Plaintext)
},
)
}
|
package cmd
import (
"fmt"
"log"
"github.com/spf13/cobra"
)
// drivercountCmd represents the drivercount command
var drivercountCmd = &cobra.Command{
Use: "drivercount",
Short: "A brief description of your command",
Run: func(cmd *cobra.Command, args []string) {
c, err := getClient()
if err != nil {
log.Fatal(err)
}
ct, err := c.GetDriverCounts()
if err != nil {
log.Fatal(err)
}
fmt.Printf("Total: %d, Mine: %d, Total Laps: %s\n", ct.Total, ct.Myracers, ct.LapCount)
},
}
func init() {
rootCmd.AddCommand(drivercountCmd)
}
|
package main
func main() {
//变量的声明
//var a int 声明变量值为0
//var b =10 声明并初始化,自动推导出数据类型
// c:=20 初始化并且自动推导
//多变量的声明
/**var a,b string
var a1,b1 string ="HENG","HA"
var a2,b2=1,2
c,d:=2,3
var {
e int
f bool
}**/
//注意go语言变量初始化回自带默认值
/**
int 0
int8 0
int32 0
int64 0
uint 0x0
rune 0 //rune的实际类型是 int32
byte 0x0 // byte的实际类型是 uint8
float32 0 //长度为 4 byte
float64 0 //长度为 8 byte
bool false
string ""
*/
/*
变量值的互换
*/
//m,n=n,m //变量值互换
//temp,_=m,n //匿名变量:变量值互换,且丢弃变量n
/*
丢弃变量
_是个特殊的变量名,任何赋予它的值都会被丢弃。该变量不占用命名空间,也不会分配内存
*/
//_,b:=34,35 //将值`35`赋予`b`,并同时丢弃`34`:
//fmt.Print(b)
/**
#### := 声明的注意事项
下面是正确的代码示例:
in, err := os.Open(file)
out, err := os.Create(file) // 此处的 err其实是赋值
但是如果在第二行赋值的变量名全部和第一行一致,则编译不通过:
in, err := os.Open(file)
in, err := os.Create(file) // 即 := 必须确保至少有一个变量是用于声明
`:=`只有对已经在同级词法域声明过的变量才和赋值操作语句等价,
如果变量是在外部词法域声明的,那么`:=`将会在当前词法域重新声明一个新的变量。
*/
/**
常量 定义后,值不能改变
*/
const A =34
//fmt.Print(A)
const PI float32 =3.1415
const mark =1<<3
//fmt.Print(mark)
/**
go 的数据结构与类型
值类型:
整型(int8,uint等) # 基础类型之数字类型
浮点型(float32,float64) # 基础类型之数字类型
复数() # 基础类型之数字类型
布尔型(bool) # 基础类型
字符串(string) # 基础类型
数组 # 复合类型
结构体(struct) # 复合类型
引用类型:即保存的是对程序中一个变量的或状态的间接引用,对其修改将影响所有该引用的拷贝
指针
切片(slice)
字典(map)
函数
管道(chan)
接口(interface)
*/
/**
常见格式化输出
%% %字面量
%b 二进制整数值,基数为2,或者是一个科学记数法表示的指数为2的浮点数
%c 字符型 常用
%d 十进制数值,基数为10 常用
%e 科学记数法e表示的浮点或者复数
%E 科学记数法E表示的浮点或者附属
%f 标准计数法表示的浮点或者附属 会用到
%o 8进制度
%p 十六进制表示的一个地址值
%s 字符串 常用
%T 输出值的类型
*/
/**
对数据类型的别名
*/
//type bi int64
//var a bi
//a=10
//fmt.Print(a)
/**
数据的多分组定义
*/
//const(
// i=100
// pi=2.145
// prefix="GO_"
// )
//var
//(
// i int
// pi float32
// prefix string
// )
//关键字iota
/**
const (
a = iota // 0
b = iota // 1
c = iota // 2
)
const (
d = iota // 0
e // 1
f // 2
)
//如果iota在同一行,则值都一样
const (
g = iota //0
h,i,j = iota,iota,iota // 1,1,1
// 此处不能定义缺省常量,如 k = 3 会编译错误
)
*/
}
|
package main
import (
"runtime"
"sync"
"testing"
"time"
)
func TestSyncInit(t *testing.T) {
s := newSema(4)
if s.count() != 0 {
t.Fatal("sema count should be 0")
}
}
func TestSyncAcquireSimpleValid(t *testing.T) {
s := newSema(2)
var wg sync.WaitGroup
wg.Add(2)
for i := 0; i < 2; i++ {
go func() {
defer wg.Done()
s.acquire()
}()
}
wg.Wait()
size := s.count()
if size != 2 {
t.Fatal("sema count should be 2 but was", size)
}
}
func TestSyncAcquireBlocking(t *testing.T) {
s := newSema(1)
var wg sync.WaitGroup
wg.Add(2)
var time1 time.Time
var time2 time.Time
go func() {
defer wg.Done()
s.acquire()
go func() {
defer wg.Done()
s.acquire()
time2 = time.Now()
}()
time.Sleep(2 * time.Second)
time1 = time.Now()
s.release()
}()
wg.Wait()
if !time1.Before(time2) {
t.Fatal("time2 should have been after time1")
}
}
func TestSyncReleaseSimpleValid(t *testing.T) {
s := newSema(1)
s.acquire()
size := s.count()
if size != 1 {
t.Fatal("sema count should be 1 but is", size)
}
s.release()
size = s.count()
if size != 0 {
t.Fatal("sema count should be 0 but is", size)
}
}
func TestSyncReleaseSimpleEmpty(t *testing.T) {
s := newSema(0)
finished1 := false
finished2 := false
go func(finished *bool) {
s.release()
*finished = true
}(&finished1)
go func(finished *bool) {
s.release()
*finished = true
}(&finished2)
time.Sleep(4 * time.Second)
if !(finished1 && finished2) {
t.Fatal("releasing an empty semaphore timed out", finished1, finished2)
}
}
func TestSyncBlocking(t *testing.T) {
s := newSema(5)
canRelease := false
baseline := runtime.NumGoroutine() + 1
numRoutinesMade := 0
go func(s *Sema, canRelease *bool, numRoutinesMade *int) {
for i := 0; i < 10; i++ {
s.acquire()
go func(s *Sema, canRelease *bool, numRoutinesMade *int) {
(*numRoutinesMade)++
for !(*canRelease) {
time.Sleep(time.Millisecond)
}
s.release()
}(s, canRelease, numRoutinesMade)
}
}(s, &canRelease, &numRoutinesMade)
time.Sleep(3 * time.Second)
limit := baseline + 5
total := runtime.NumGoroutine()
if total > limit && numRoutinesMade != 5 {
t.Fatal("there should be at most", limit, "go routines but there are", total)
}
canRelease = true
}
|
package main
import "fmt"
//Bill Kennedy teaches online intermediate classes of golang
//We create VALUES of a certain type that are stored in VARIABLES
//and those VARIABLES have identifiers
var x int //static type int, var x is of type int, x is identifier
type person struct{ //var type are keywords plus identifiers, person is identifier
first string
last string
}
type foo int
var y foo //alias type
const bar int = 42 //____ indentifier and type
func main() {
p1 := person{ //composite literal
first: "James",
last: "bond",
}
fmt.Println(p1)
y = 42 //indeterminate via foo compiler need to determine
//base type. Contstant of a kind
fmt.Printf("%T\n", int(y)) //using conversion on y identifier
fmt.Printf("%T\n", bar)
fmt.Println(bar)
}
//Is go an Object-oriented language?
//Yes and no. Although Go has types and methods and allows an object-oriented style of programming, there is no type hierarchy. The concept of “interface” in Go provides a different approach that we believe is easy to use and in some ways more general. There are also ways to embed types in other types to provide something analogous—but not identical—to subclassing. Moreover, methods in Go are more general than in C++ or Java: they can be defined for any sort of data, even built-in types such as plain, “unboxed” integers. They are not restricted to structs (classes).
//Also, the lack of a type hierarchy makes “objects” in Go feel much more lightweight than in languages such as C++ or Java.
|
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"github.com/nlopes/slack"
"io"
"log"
"os"
"os/exec"
"strconv"
"strings"
"sync/atomic"
"syscall"
"time"
"unicode"
)
type Process struct {
*os.Process
Tty string
Cwd string
}
type SlackMessage struct {
user string
token string
}
var ErrInvalidNumber = fmt.Errorf("please enter a valid number")
var ErrProcNotRunning = fmt.Errorf("error: process is not running")
func FindByPid(pid int) (*Process, error) {
proc := new(Process)
var err error
proc.Process, err = os.FindProcess(pid)
if err != nil {
return nil, err
}
pidStr := strconv.Itoa(proc.Pid)
lsofOutput, err := exec.Command("lsof", "-p", pidStr).Output()
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(bytes.NewReader(lsofOutput))
for scanner.Scan() {
words := strings.FieldsFunc(scanner.Text(), unicode.IsSpace)
if words[3] == "cwd" {
proc.Cwd = strings.TrimSpace(strings.Join(words[8:], " "))
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
return proc, nil
}
func FindByName(stdout io.Writer, stdin io.Reader, name string) (*Process, error) {
psOutput, err := exec.Command("ps", "-e").Output()
if err != nil {
return nil, err
}
lowercaseOutput := bytes.ToLower(psOutput)
var names []string
scanner := bufio.NewScanner(bytes.NewReader(lowercaseOutput))
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, name) {
names = append(names, line)
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
for i, name := range names {
fmt.Printf("%d: %s\n", i, name)
}
procNumber := -1
_, err = fmt.Fprintln(stdout, "\nThe correct process number:")
checkErr(err)
_, err = fmt.Fscanf(stdin, "%d", &procNumber)
checkErr(err)
if procNumber < 0 {
return nil, ErrInvalidNumber
}
pid, err := strconv.Atoi(strings.TrimSpace(
strings.FieldsFunc(names[procNumber], unicode.IsSpace)[0]),
)
if err != nil {
return nil, err
}
return FindByPid(pid)
}
func (p *Process) HealthCheck() error {
if err := p.Signal(syscall.Signal(0)); err != nil {
return ErrProcNotRunning
}
return nil
}
func main() {
pid := flag.Int("pid", -1, "the pid of the process to follow")
slackToken := flag.String("token", "", "Slack bot token")
slackUser := flag.String("user", "", "Slack ID")
interval := flag.Int("interval", 100, "interval for health checking the process in milliseconds")
procName := flag.String("name", "", "the name of the process to find a pid for")
flag.Parse()
if *pid == -1 && *procName == "" {
log.Fatalf("pid or name flag not specified")
}
slackMessage := SlackMessage{user: *slackUser, token: *slackToken}
var err error
var proc *Process
if *pid != -1 {
proc, err = FindByPid(*pid)
if err != nil {
log.Fatalln(err)
}
} else {
proc, err = FindByName(os.Stdout, os.Stdin, *procName)
if err != nil {
log.Fatalln(err)
}
}
if err := proc.HealthCheck(); err != nil {
log.Fatalln(err)
}
fmt.Print(proc)
var running int64
for {
if atomic.LoadInt64(&running) == 0 {
err = proc.HealthCheck()
if err != nil {
message := fmt.Sprintf("Process with pid %d finished", proc.Process.Pid)
err := slackMessage.sendSlack(message)
checkErr(err)
fmt.Println("process finished")
break
}
}
time.Sleep(time.Millisecond * time.Duration(*interval))
}
}
func (slackMessage *SlackMessage) sendSlack(msg string) error {
api := slack.New(slackMessage.token)
userID := slackMessage.user
_, _, channelID, err := api.OpenIMChannel(userID)
checkErr(err)
_, _, err = api.PostMessage(channelID, msg, slack.PostMessageParameters{})
if err != nil {
return err
}
return nil
}
func checkErr(err error) {
if err != nil {
fmt.Printf("%s\n", err)
}
}
|
package main
import "github.com/sadasant/scripts/go/euler/euler"
var months_limit = []int {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31,
}
func solution(min_year, max_year int) int {
var week_day = 2;
var month_day = 1;
var month = 0;
var year = 1900;
var saturday_firsts = 0;
for year <= max_year{
if year >= min_year && week_day == 1 && month_day == 1 {
// euler.Println(year, month, month_day, week_day)
saturday_firsts++;
}
if week_day < 7 {
week_day++
} else {
week_day = 1
}
if month == 1 && month_day == 29 && (year%100 == 0 && year%400 == 0 || year%4 == 0) {
month_day = 1
month++
year++
} else if month_day >= 28 && month_day == months_limit[month] {
month_day = 1
if month < 11 {
month++
} else {
month = 0
year++
}
} else {
month_day++
}
}
return saturday_firsts
}
func solution2(years ...int) int {
day, count := 2, 0
for _, y := range years {
for m := 0; m < 12; m++ {
if m == 1 && y%4 == 0 {
day += 29
} else {
day += months_limit[m]
}
if day%7 == 0 {
count++
}
}
}
return count
}
func solution3(n_years int) int {
return 12*n_years/7
}
func main() {
euler.Init(19, "How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?")
euler.PrintTime("Full specs | Result: %v, Nanoseconds: %d\n", solution, 1901, 2000)
euler.PrintTime("Smaller code | Result: %v, Nanoseconds: %d\n", solution2, euler.Sequence(1901, 2000)...)
euler.PrintTime("Math | Result: %v, Nanoseconds: %d\n", solution3, 100)
}
|
package leetcode_1486_数组异或操作
/*
给你两个整数,n 和 start 。
数组 nums 定义为:nums[i] = start + 2*i(下标从 0 开始)且 n == nums.length 。
请返回 nums 中所有元素按位异或(XOR)后得到的结果。
示例 1:
输入:n = 5, start = 0
输出:8
解释:数组 nums 为 [0, 2, 4, 6, 8],其中 (0 ^ 2 ^ 4 ^ 6 ^ 8) = 8 。
"^" 为按位异或 XOR 运算符。
示例 2:
输入:n = 4, start = 3
输出:8
解释:数组 nums 为 [3, 5, 7, 9],其中 (3 ^ 5 ^ 7 ^ 9) = 8.
示例 3:
输入:n = 1, start = 7
输出:7
示例 4:
输入:n = 10, start = 5
输出:2
提示:
1 <= n <= 1000
0 <= start <= 1000
n == nums.length
*/
/*
思考:
1. 创建一个数组,len 为 n
2. for 循环数组, 执行 nums[i] = start + 2*i
3. for 循环 nums,进行异或(重点,不能挨个异或,会多)
---
第三步可以和第二步合在一起做?
*/
func xorOperation(n int, start int) int {
// var nums = make([]int, n)
// for i := 0; i < n; i++ {
// nums[i] = start + 2*i
// }
// for i := 0; i < n-1; i++ {
// nums[i+1] = nums[i] ^ nums[i+1]
// }
// return nums[n-1]
result := 0
for i := 0; i < n; i++ {
num := start + 2*i
result = result ^ num
}
return result
}
|
package cards
const (
CARD_A_HEART byte = iota
CARD_A_DIAMOND
CARD_A_CLUB
CARD_A_SPADE
CARD_K_HEART
CARD_K_DIAMOND
CARD_K_CLUB
CARD_K_SPADE
CARD_Q_HEART
CARD_Q_DIAMOND
CARD_Q_CLUB
CARD_Q_SPADE
CARD_J_HEART
CARD_J_DIAMOND
CARD_J_CLUB
CARD_J_SPADE
CARD_10_HEART
CARD_10_DIAMOND
CARD_10_CLUB
CARD_10_SPADE
CARD_9_HEART
CARD_9_DIAMOND
CARD_9_CLUB
CARD_9_SPADE
CARD_8_HEART
CARD_8_DIAMOND
CARD_8_CLUB
CARD_8_SPADE
CARD_7_HEART
CARD_7_DIAMOND
CARD_7_CLUB
CARD_7_SPADE
CARD_6_HEART
CARD_6_DIAMOND
CARD_6_CLUB
CARD_6_SPADE
CARD_5_HEART
CARD_5_DIAMOND
CARD_5_CLUB
CARD_5_SPADE
CARD_4_HEART
CARD_4_DIAMOND
CARD_4_CLUB
CARD_4_SPADE
CARD_3_HEART
CARD_3_DIAMOND
CARD_3_CLUB
CARD_3_SPADE
CARD_2_HEART
CARD_2_DIAMOND
CARD_2_CLUB
CARD_2_SPADE
)
|
package main
import (
"container/list"
"fmt"
)
func main() {
link := list.New()
for i := 0; i <= 10; i++ {
link.PushBack(i)
}
for p := link.Front(); p != link.Back(); p = p.Next() {
fmt.Println("Number", p.Value)
}
} |
package repository
import (
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type DingdingRobot struct {
ID primitive.ObjectID `bson:"_id,omitempty" json:"id"`
Name string `bson:"name" json:"name"`
Description string `bson:"description" json:"description"`
Token string `bson:"token" json:"token"`
Secret string `bson:"secret" json:"secret"`
CreatedAt time.Time `bson:"created_at" json:"created_at"`
UpdatedAt time.Time `bson:"updated_at" json:"updated_at"`
}
type DingdingRobotRepo interface {
Add(robot DingdingRobot) (id primitive.ObjectID, err error)
Get(id primitive.ObjectID) (robot DingdingRobot, err error)
Find(filter bson.M) (robots []DingdingRobot, err error)
Paginate(filter bson.M, offset, limit int64) (robots []DingdingRobot, next int64, err error)
DeleteID(id primitive.ObjectID) error
Delete(filter bson.M) error
Update(id primitive.ObjectID, robot DingdingRobot) error
Count(filter bson.M) (int64, error)
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(stdin io.Reader, stdout io.Writer) {
sc := bufio.NewScanner(stdin)
sc.Scan()
n, _ := strconv.Atoi(sc.Text())
sc.Scan()
k, _ := strconv.Atoi(sc.Text())
a := []int{}
for i := 0; i < n; i++ {
sc.Scan()
ai, _ := strconv.Atoi(sc.Text())
a = append(a, ai)
}
// ans := lowerBound(n, func(i int) bool { return a[i] >= k })
ans := lowerBound2(n, func(i int) bool { return a[i] >= k })
fmt.Fprintln(stdout, ans)
}
func lowerBound(n int, f func(int) bool) (idx int) {
for i := 0; i < n; i++ {
if f(i) {
return i
}
}
return n
}
func lowerBound2(n int, f func(int) bool) (idx int) {
idx = n
begin, end := 0, n
for end-begin > 0 {
pivot := (begin + end) / 2
if f(pivot) {
idx = pivot
end = pivot
} else {
begin = pivot + 1
}
}
return idx
}
|
package main
import "fmt"
function main(){
names := []string{"stanely", "david", "oscar"}
vals := make([]interface{}, len(names))
for i,v := range names{
vals[i] = v
}
PrintAll(vals)
}
func PrintAll(vals []interface{}){
for _, val := range vals{
fmt.Println(val)
}
}
|
/*
Package handlers : handle MQTT message and deploy object to kubernetes.
license: Apache license 2.0
copyright: Nobuyuki Matsui <nobuyuki.matsui@gmail.com>
*/
package handlers
import (
"k8s.io/apimachinery/pkg/runtime"
)
/*
HandlerInf : a interface to specify the method signatures that an object handler should be implemented.
*/
type HandlerInf interface {
Apply(runtime.Object) string
Delete(runtime.Object) string
}
|
package access
import (
"fmt"
"github.com/dgrijalva/jwt-go"
"time"
)
var (
// tokenExpiredDate app token过期日期 30天
tokenExpiredDate = 3600 * 24 * 7 * time.Second
// tokenIDKeyPrefix tokenID 前缀
tokenIDKeyPrefix = "token:auth:id:"
tokenExpiredTopic = "com.qianxunke.shop.topic.auth.tokenExpired"
)
//token 持有者
type Subject struct {
ID string `json:"id"`
Name string `json:"name,omitempty"`
}
// standardClaims token 标准的Claims
type standardClaims struct {
SubjectID string `json:"subjectId,omitempty"`
Name string `json:"name,omitempty"`
jwt.StandardClaims
}
//生成token并保存到redis
func (s *service) MakeAccessToken(subject *Subject) (ret string, err error) {
m, err := s.createTokenClaims(subject)
if err != nil {
return "", fmt.Errorf("[MakeAccessToken] 创建token Claim 失败,err: %s", err)
}
//创建token
token := jwt.NewWithClaims(jwt.SigningMethodHS256, m)
ret, err = token.SignedString([]byte(cfg.SecretKey))
if err != nil {
return "", fmt.Errorf("[MakeAccessToken] 创建token失败,err: %s", err)
}
//保存加密的token到redis
err = s.saveTokenToCache(subject, ret)
if err != nil {
return "", fmt.Errorf("[MakeAccessToken] 保存token到缓存失败,err: %s", err)
}
return
}
// GetCachedAccessToken 获取token
func (s *service) GetCacheAccessToken(subject *Subject) (ret string, err error) {
ret, err = s.getTokenFromCache(subject)
if err != nil {
return "", fmt.Errorf("[GetCachedAccessToken] 从缓存获取token失败,err: %s", err)
}
return
}
//清除用户toekn
func (s *service) DelUserAccessToken(token string) (err error) {
//解析token
claims, err := s.parseToken(token)
if err != nil {
return fmt.Errorf("[DelUserAccessToken] 错误的token,err: %s", err)
}
//通过解析到的用户id删除
err = s.delTokenFromCache(&Subject{
ID: claims.Subject,
})
if err != nil {
return fmt.Errorf("[DelUserAccessToken] 清除用户token,err: %s", err)
}
return
}
func (s *service) AuthenticationFromToken(tk string) (subject *Subject, err error) {
cliaim, err := s.parseToken(tk)
//如果此token无效
if err != nil {
return
}
subject = &Subject{
ID: cliaim.Subject,
}
cacheToken, err := s.getTokenFromCache(subject)
if err != nil || len(cacheToken) == 0 || cacheToken != tk {
return nil, fmt.Errorf("[AuthenticationFromToken] 从缓存获取token失败,err: %s", err)
}
return
}
|
package main
import (
"strconv"
"testing"
"time"
)
func TestGenOverdueDays(t *testing.T) {
data := [][]time.Time{[]time.Time{time.Date(2009, time.Month(5), 3, 0, 0, 0, 0, time.UTC), time.Date(2009, time.Month(5), 3, 24, 0, 0, 0, time.UTC)},
[]time.Time{time.Date(2009, time.Month(5), 3, 0, 0, 0, 0, time.UTC), time.Date(2009, time.Month(5), 4, 24, 0, 0, 0, time.UTC)},
[]time.Time{time.Date(2009, time.Month(5), 3, 0, 0, 0, 0, time.UTC), time.Date(2009, time.Month(5), 20, 24, 0, 0, 0, time.UTC)},
[]time.Time{time.Date(2009, time.Month(5), 3, 0, 0, 0, 0, time.UTC), time.Date(2009, time.Month(8), 4, 24, 0, 0, 0, time.UTC)},
[]time.Time{time.Date(2009, time.Month(5), 3, 0, 0, 0, 0, time.UTC), time.Date(2010, time.Month(5), 2, 24, 0, 0, 0, time.UTC)},
[]time.Time{time.Date(2009, time.Month(5), 3, 0, 0, 0, 0, time.UTC), time.Date(2011, time.Month(8), 4, 24, 0, 0, 0, time.UTC)}}
result := []float64{0.00, 0.00, 6.00, 81.00, 352.00, 811.00}
for i, j := range data {
if genOverdueDays(j[0], j[1]) == result[i] {
t.Log("test genOverdueDays(\"" + j[0].Format("20060102") + "\", \"" + j[1].Format("20060102") + "\") == " + strconv.FormatFloat(result[i], 'f', -1, 64) + " passed")
} else {
t.Error("test genOverdueDays(\"" + j[0].Format("20060102") + "\", \"" + j[1].Format("20060102") + "\") == " + strconv.FormatFloat(result[i], 'f', -1, 64) + " failed, got " + strconv.FormatFloat(genOverdueDays(j[0], j[1]), 'f', -1, 64))
}
}
}
|
/*
* @lc app=leetcode.cn id=1 lang=golang
*
* [1] 两数之和
*
* https://leetcode-cn.com/problems/two-sum/description/
*
* algorithms
* Easy (46.84%)
* Likes: 6596
* Dislikes: 0
* Total Accepted: 621.9K
* Total Submissions: 1.3M
* Testcase Example: '[2,7,11,15]\n9'
*
* 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
*
* 你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。
*
* 示例:
*
* 给定 nums = [2, 7, 11, 15], target = 9
*
* 因为 nums[0] + nums[1] = 2 + 7 = 9
* 所以返回 [0, 1]
*
*
*/
// @lc code=start
func twoSum(nums []int, target int) []int {
var flip_arr map[int]int = map[int]int{}
var res []int
for index := 0; index < len(nums); index++ {
diff := target - nums[index]
if v, ok := flip_arr[diff]; ok == true {
res = append(res, v)
res = append(res, index)
return res
} else {
flip_arr[nums[index]] = index
}
}
return res
}
// @lc code=end
|
package utils
import (
"os"
)
func GetPodName() (name string) {
return os.Getenv("POD_NAME")
}
|
package metrics_test
func (s *metrics) TestHost() {
result, streamURL, err := s.metrics.Host(nil)
if !s.NoError(err) {
return
}
s.Nil(streamURL)
if !s.NotNil(result) {
return
}
}
|
package main
import (
"fmt"
"os"
"sort"
"strings"
)
type PathQuery struct {
QueryPath string
DirectoryPath string
Filename string
}
func NewPathQuery(q string) (isPath bool, pq *PathQuery) {
pq = &PathQuery{}
isPath, pq.QueryPath = ExpandPathString(q)
if !isPath {
return
}
pq.DirectoryPath = pq.QueryPath
stat, err := os.Stat(pq.QueryPath)
if err == nil && stat.IsDir() && !strings.HasSuffix(pq.DirectoryPath, "/") {
pq.DirectoryPath += "/"
} else {
if ind := strings.LastIndex(pq.QueryPath, "/"); ind >= 0 {
pq.DirectoryPath = pq.QueryPath[:ind+1]
pq.Filename = pq.QueryPath[ind+1:]
}
}
return
}
func (pq *PathQuery) MakeLaunchEntry() (*LaunchEntry, error) {
stat, err := os.Stat(pq.QueryPath)
if err != nil {
return nil, err
}
if IsExecutable(stat) {
return nil, fmt.Errorf("`%v` is executable", pq.QueryPath)
}
entry, err := NewEntryForFile(pq.QueryPath, "<b>"+pq.QueryPath+"</b>", pq.DirectoryPath)
if err != nil {
return nil, err
}
return entry, nil
}
func (pq *PathQuery) DirFilenames() ([]string, error) {
dir, err := os.Open(pq.DirectoryPath)
if err != nil {
return nil, err
}
defer dir.Close()
stat, err := dir.Stat()
if err != nil {
return nil, err
}
if !stat.IsDir() {
return nil, fmt.Errorf("`%v` is not a directory", pq.DirectoryPath)
}
filenames, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
return filenames, nil
}
func SearchFileEntries(query string) (results LaunchEntriesList) {
query = ExpandEnvVars(query)
isPath, pq := NewPathQuery(query)
if !isPath {
return
}
if entry, err := pq.MakeLaunchEntry(); err != nil {
errduring("making file entry `%v`", err, "Skipping it", pq.QueryPath)
} else if !IsInHistory(entry.Cmdline) {
entry.QueryIndex = -1
results = append(results, entry)
}
filenames, err := pq.DirFilenames()
if err != nil {
errduring("retrieving dir `%v` filenames", err, "No file entries are retrieved", pq.DirectoryPath)
return
}
sort.Strings(filenames)
qflen := len(pq.Filename)
pqLoaseFilename := strings.ToLower(pq.Filename)
for _, name := range filenames {
lcname := strings.ToLower(name)
if !strings.HasPrefix(lcname, pqLoaseFilename) {
continue
}
if name == pq.Filename {
continue
}
path := pq.DirectoryPath + name
isDir := false
if stat, err := os.Stat(path); err == nil && stat.IsDir() {
isDir = true
}
tabPath := pq.DirectoryPath + name
if isDir {
tabPath += "/"
}
displayPath := fmt.Sprintf(".../<b>%v</b>%v", name[0:qflen], name[qflen:])
entry, err := NewEntryForFile(path, displayPath, tabPath)
if err != nil {
errduring("file entry addition `%v`", err, "Skipping it", path)
continue
}
if !isDir {
entry.QueryIndex = 1
}
results = append(results, entry)
}
results.SortByIndex()
return
}
|
/*
* traPCollection API
*
* traPCollectionのAPI
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// ProductKeyGen - プロダクトキー生成のリクエスト
type ProductKeyGen struct {
Num int32 `json:"num"`
// バージョンID
Version string `json:"version"`
}
|
package azure
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-11-01/network"
"github.com/protofire/polkadot-failover-mechanism/pkg/helpers"
)
type securityRuleItem struct {
sourcePortRanges []string
sourceAddressesPrefixes []string
destinationPortRanges []string
destinationAddressesPrefixes []string
priority int
protocol string
direction string
}
func (sr securityRuleItem) equals(other securityRuleItem) bool {
return reflect.DeepEqual(sr, other)
}
func compareRules(testRules []securityRuleItem, actualRules []securityRuleItem) error {
for _, actualRule := range actualRules {
found := false
for _, testRule := range testRules {
if testRule.equals(actualRule) {
found = true
break
}
}
if !found {
return fmt.Errorf("cannot find coinside rule for %+v", actualRule)
}
}
return nil
}
//nolint
func getNetworkSecurityGroupClient(subscriptionID string) (network.SecurityGroupsClient, error) {
client := network.NewSecurityGroupsClient(subscriptionID)
auth, err := getAuthorizer()
if err != nil {
return client, fmt.Errorf("Cannot get authorizer: %w", err)
}
client.Authorizer = auth
return client, nil
}
//nolint
func getNetworkSecurityRuleClient(subscriptionID string) (network.SecurityRulesClient, error) {
client := network.NewSecurityRulesClient(subscriptionID)
auth, err := getAuthorizer()
if err != nil {
return client, fmt.Errorf("Cannot get authorizer: %w", err)
}
client.Authorizer = auth
return client, nil
}
//nolint
func getSecurityGroups(subscriptionID, resourceGroup string) ([]network.SecurityGroup, error) {
client, err := getNetworkSecurityGroupClient(subscriptionID)
if err != nil {
return nil, err
}
ctx := context.Background()
result, err := client.List(ctx, resourceGroup)
if err != nil {
return nil, err
}
groups := result.Values()
for err = result.NextWithContext(ctx); err != nil; err = result.NextWithContext(ctx) {
groups = append(groups, result.Values()...)
}
return groups, nil
}
func filterSecurityGroups(sgs *[]network.SecurityGroup, handler func(sg network.SecurityGroup) bool) {
start := 0
for i := start; i < len(*sgs); i++ {
if !handler((*sgs)[i]) {
// sgs will be deleted
continue
}
if i != start {
(*sgs)[start], (*sgs)[i] = (*sgs)[i], (*sgs)[start]
}
start++
}
*sgs = (*sgs)[:start]
}
func prepareTestRules(exposePrometheus, exposeSSH bool) []securityRuleItem {
subnetPorts := []string{"8300", "8301", "8600", "8500", "8302"}
if exposePrometheus {
subnetPorts = append(subnetPorts, "9273")
}
sort.Strings(subnetPorts)
inboundSubnetRules := []securityRuleItem{
{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"10.0.0.0/24"},
destinationPortRanges: subnetPorts,
destinationAddressesPrefixes: []string{"*"},
priority: 102,
protocol: "*",
direction: "Inbound",
},
{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"10.1.0.0/24"},
destinationPortRanges: subnetPorts,
destinationAddressesPrefixes: []string{"*"},
priority: 103,
protocol: "*",
direction: "Inbound",
},
{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"10.2.0.0/24"},
destinationPortRanges: subnetPorts,
destinationAddressesPrefixes: []string{"*"},
priority: 104,
protocol: "*",
direction: "Inbound",
},
}
inboundWildcardRules := []securityRuleItem{
{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"*"},
destinationPortRanges: []string{"30333"},
destinationAddressesPrefixes: []string{"*"},
priority: 101,
protocol: "*",
direction: "Inbound",
},
}
if exposeSSH {
inboundWildcardRules = append(inboundWildcardRules, securityRuleItem{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"*"},
destinationPortRanges: []string{"22"},
destinationAddressesPrefixes: []string{"*"},
priority: 100,
protocol: "Tcp",
direction: "Inbound",
})
}
if exposePrometheus {
inboundWildcardRules = append(inboundWildcardRules, securityRuleItem{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"*"},
destinationPortRanges: []string{"9273"},
destinationAddressesPrefixes: []string{"*"},
priority: 105,
protocol: "Tcp",
direction: "Inbound",
})
}
outboundRules := []securityRuleItem{
{
sourcePortRanges: []string{"*"},
sourceAddressesPrefixes: []string{"*"},
destinationPortRanges: []string{"*"},
destinationAddressesPrefixes: []string{"*"},
priority: 100,
protocol: "Tcp",
direction: "Outbound",
},
}
var rules []securityRuleItem
rules = append(rules, inboundSubnetRules...)
rules = append(rules, inboundWildcardRules...)
rules = append(rules, outboundRules...)
return rules
}
// SecurityGroupsCheck checks that all SG rules has been applied correctly
func SecurityGroupsCheck(prefix, subscriptionID, resourceGroup string, exposePrometheus, exposeSSH bool) error {
sgs, err := getSecurityGroups(subscriptionID, resourceGroup)
if err != nil {
return err
}
filterSecurityGroups(&sgs, func(sg network.SecurityGroup) bool {
return strings.HasPrefix(*sg.Name, helpers.GetPrefix(prefix))
})
var rules []securityRuleItem
for _, sg := range sgs {
for _, sr := range *sg.SecurityRules {
sap := *sr.SourceAddressPrefixes
if len(sap) == 0 {
sap = []string{*sr.SourceAddressPrefix}
}
dap := *sr.DestinationAddressPrefixes
if len(dap) == 0 {
dap = []string{*sr.DestinationAddressPrefix}
}
spr := *sr.SourcePortRanges
if len(spr) == 0 {
spr = []string{*sr.SourcePortRange}
}
dpr := *sr.DestinationPortRanges
if len(dpr) == 0 {
dpr = []string{*sr.DestinationPortRange}
}
sort.Strings(dpr)
sort.Strings(dap)
sort.Strings(spr)
sort.Strings(sap)
rule := securityRuleItem{
sourceAddressesPrefixes: sap,
sourcePortRanges: spr,
destinationAddressesPrefixes: dap,
destinationPortRanges: dpr,
priority: int(*sr.Priority),
protocol: string(sr.Protocol),
direction: string(sr.Direction),
}
rules = append(rules, rule)
}
}
return compareRules(prepareTestRules(exposePrometheus, exposeSSH), rules)
}
|
package main
import "fmt"
func a() {
for i := 0; i < 50; i++ {
fmt.Print("a")
}
}
func b() {
for i := 0; i < 50; i++ {
fmt.Print("b")
}
}
func slow() {
a()
b()
fmt.Println("\nend slow()")
}
func fast() {
go a()
go b()
fmt.Println("\nend fast()")
}
func main() {
slow()
fast()
fmt.Println("\nend main()")
}
|
package repository
import (
"errors"
"gid/entity"
"gid/library/log"
"gid/library/tool"
"go.uber.org/zap"
)
func (r *Repository) SegmentsCreate(s *entity.Segments) (err error) {
var has bool
if has, err = r.db.Where("biz_tag = ?", s.BizTag).Exist(&entity.Segments{}); err != nil {
log.GetLogger().Error("[SegmentsCreate] Exist", zap.Any("req", s), zap.Error(err))
return
}
if has {
err = errors.New("tag already exists")
return
}
s.CreateTime = tool.GetTimeUnix()
if _, err = r.db.Insert(s); err != nil {
log.GetLogger().Error("[SegmentsCreate] Create", zap.Any("req", s), zap.Error(err))
return
}
return
}
|
package detect
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
var expected = []struct {
ua string
platform string
}{
// iPhone
{"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419", "iOS"},
{"Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_3 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B511 Safari/9537.53", "iOS"},
// iPad
{"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 Safari/531.21.10", "iOS"},
// iPod
{"Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419", "iOS"},
// Android misc.
{"Opera/9.80 (Android 4.2.1; Linux; Opera Mobi/ADR-1212030829) Presto/2.11.355 Version/12.10", "Android"},
{"Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19", "Android"},
{"Mozilla/5.0 (Linux; U; Android 1.5; de-; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1", "Android"},
{"Mozilla/5.0 (Android; Tablet; rv:26.0) Gecko/26.0 Firefox/26.0", "Android"},
{"Mozilla/5.0 (Android; Mobile; rv:17.0) Gecko/17.0 Firefox/17.0", "Android"},
// BlackBerry
{"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1+", "BlackBerry"},
// BB10
{"Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+", "Unknown Platform"},
// webOS
{"Mozilla/5.0 (webOS/1.4.0; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.1", "Unknown Platform"},
// Symbian
{"Mozilla/5.0 (SymbianOS/9.1; U; [en-us]) AppleWebKit/413 (KHTML, like Gecko) Safari/413", "Symbian OS"},
{"Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525", "Symbian OS"},
// Firefox OS
{"Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0", "Unknown Platform"},
{"Mozilla/5.0 (Tablet; rv:26.0) Gecko/26.0 Firefox/26.0", "Unknown Platform"},
// Windows phone
{"Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; SGH-i917)", "Windows Phone"},
{"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)", "Windows Phone"},
{"HTC_Touch_3G Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 7.11)", "Unknown Windows OS"},
// Desktop
{"Opera/9.80 (X11; Linux x86_64) Presto/2.12.388 Version/12.10", "Linux"},
{"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19", "Windows 7"},
{"", "Unknown Platform"},
{"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13", "Mac OS X"},
{"alksjdlakdj", "Unknown Platform"},
{"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0; Touch)", "Windows 8"},
{"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0)", "Windows 8.1"},
{"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; MDDRJS; rv:11.0) like Gecko", "Windows 7"},
{"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "Windows 7"},
{"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko", "Windows 8.1"},
{"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko", "Windows 7"},
{"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko", "Windows 8.1"},
{"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)", "Windows 7"},
{"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)", "Windows 7"},
{"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.2; ARM; Trident/6.0; Touch; .NET4.0E; .NET4.0C; Tablet PC 2.0)", "Windows 8"},
{"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)", "Windows 8"},
}
func TestSetPlatform(t *testing.T) {
for _, test := range expected {
u := New(test.ua)
u.setPlatform()
t.Run(fmt.Sprintf("(UserAgent: %s) platform is %s", test.ua, test.platform), func(t *testing.T) {
assert.Equal(t, test.platform, u.PlatForm)
})
}
}
|
package db
import (
"context"
log "github.com/sirupsen/logrus"
ethcommon "github.com/ethereum/go-ethereum/common"
"github.com/Magicking/faktur-daemon/common"
"github.com/Magicking/faktur-daemon/merkle"
"github.com/jinzhu/gorm"
)
type DbReceipt struct {
gorm.Model
Targethash string
Proofs string // unique
MerkleRoot string
TransactionId int
Transaction *Dbtransaction
}
type Dbtransaction struct {
gorm.Model
MerkleRoot string
TransactionHash string
Status int
}
const (
NOT_SENT = iota
RETRY
WAITING_CONFIRMATION
SENT
)
func GetTxByRoot(ctx context.Context, root ethcommon.Hash) (*Dbtransaction, error) {
db := common.DBFromContext(ctx)
var tx Dbtransaction
cursor := db.Where(Dbtransaction{MerkleRoot: root.Hex()}).First(&tx)
if cursor.RecordNotFound() {
return nil, nil
}
if cursor.Error != nil {
return nil, cursor.Error
}
return &tx, nil
}
func GetReceiptsByHash(ctx context.Context, targetHash ethcommon.Hash) ([]DbReceipt, error) {
db := common.DBFromContext(ctx)
var rcpts []DbReceipt
cursor := db.Preload("Transaction").Where(DbReceipt{Targethash: targetHash.Hex()}).Find(&rcpts)
if cursor.RecordNotFound() {
return nil, nil
}
if cursor.Error != nil {
return nil, cursor.Error
}
return rcpts, nil
}
func SaveReceipt(ctx context.Context, proofs *merkle.Branch, targetHash merkle.Hashable, root ethcommon.Hash, tx *Dbtransaction) error {
db := common.DBFromContext(ctx)
dbrcpt := DbReceipt{
Targethash: targetHash.Hex(),
Proofs: proofs.String(),
MerkleRoot: root.Hex(),
Transaction: tx,
}
if err := db.Create(&dbrcpt).Error; err != nil {
return err
}
return nil
}
func UpdateTx(ctx context.Context, root ethcommon.Hash, txHash *ethcommon.Hash, state int) error {
db := common.DBFromContext(ctx)
//TODO Facktorize & optimise
var tx Dbtransaction
cursor := db.Where("merkle_root = ?", root.Hex()).Find(&tx)
if cursor.RecordNotFound() {
var _txHash string
if txHash != nil {
_txHash = txHash.Hex()
}
dbtx := Dbtransaction{
MerkleRoot: root.Hex(),
TransactionHash: _txHash,
Status: state,
}
if err := db.Create(&dbtx).Error; err != nil {
return err
}
return nil
}
cursor = db.Model(&Dbtransaction{}).Where("merkle_root = ?", root.Hex())
if cursor.Error != nil {
return cursor.Error
}
if txHash != nil {
cursor = cursor.Updates(&Dbtransaction{Status: state, TransactionHash: txHash.Hex()})
} else {
cursor = cursor.Updates(&Dbtransaction{Status: state})
}
if cursor.Error != nil {
return cursor.Error
}
return nil
}
func FilterByState(ctx context.Context, state int) (dbtx []*Dbtransaction, err error) {
db := common.DBFromContext(ctx)
dbtx = make([]*Dbtransaction, 0)
cursor := db.Where(&Dbtransaction{Status: state}).Find(&dbtx)
if cursor.Error != nil {
return nil, err
}
if cursor.RecordNotFound() {
return nil, nil
}
return dbtx, nil
}
// Get Txs w/ STATUS
// Update Tx w/ NEW_STATUS
func MigrateDatabase(ctx context.Context) {
db := common.DBFromContext(ctx)
if err := db.AutoMigrate(&DbReceipt{}).Error; err != nil {
db.Close()
log.Fatalf("Could not migrate models to database: %v", err)
}
if err := db.AutoMigrate(&Dbtransaction{}).Error; err != nil {
db.Close()
log.Fatalf("Could not migrate models to database: %v", err)
}
}
|
package utils
import (
"fmt"
"net"
"os"
"os/signal"
"regexp"
"syscall"
"github.com/miekg/dns"
"github.com/ray-g/dnsproxy/logger"
)
const (
NotIPQuery = 0
IPv4Query = 4
IPv6Query = 6
)
func IsIPQuery(q dns.Question) int {
if q.Qclass != dns.ClassINET {
return NotIPQuery
}
switch q.Qtype {
case dns.TypeA:
return IPv4Query
case dns.TypeAAAA:
return IPv6Query
default:
return NotIPQuery
}
}
// UnFqdn function
func UnFqdn(s string) string {
if dns.IsFqdn(s) {
return s[:len(s)-1]
}
return s
}
func IsDomain(domain string) bool {
if IsIP(domain) {
return false
}
match, _ := regexp.MatchString(`^([a-zA-Z0-9\*]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6}$`, domain)
return match
}
func IsIP(ip string) bool {
return (net.ParseIP(ip) != nil)
}
func EnsureDirectory(path string) error {
info, err := os.Stat(path)
if os.IsNotExist(err) {
if os.MkdirAll(path, os.ModePerm) != nil {
return fmt.Errorf("failed to create folders: %s", path)
}
}
if err == nil && !info.IsDir() {
return fmt.Errorf("%s exists but not a folder", path)
}
return nil
}
func WaitSysSignal() {
// Waiting for close
osSignals := make(chan os.Signal, 1)
signal.Notify(osSignals, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGHUP)
sig := <-osSignals
logger.Debugf("Received signal: %v", sig)
}
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package datacoord
import (
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/metrics"
"github.com/milvus-io/milvus/internal/proto/datapb"
)
const clusterPrefix = "cluster-prefix/"
const clusterBuffer = "cluster-buffer"
type dataNodeStatus int8
const (
online dataNodeStatus = iota
offline
)
type dataNodeInfo struct {
info *datapb.DataNodeInfo
status dataNodeStatus
}
type clusterNodeManager struct {
kv kv.TxnKV
dataNodes map[string]*dataNodeInfo
chanBuffer []*datapb.ChannelStatus //Unwatched channels buffer
}
func newClusterNodeManager(kv kv.TxnKV) (*clusterNodeManager, error) {
c := &clusterNodeManager{
kv: kv,
dataNodes: make(map[string]*dataNodeInfo),
chanBuffer: []*datapb.ChannelStatus{},
}
return c, c.loadFromKv()
}
func (c *clusterNodeManager) loadFromKv() error {
_, values, err := c.kv.LoadWithPrefix(clusterPrefix)
if err != nil {
return err
}
for _, v := range values {
info := &datapb.DataNodeInfo{}
if err := proto.UnmarshalText(v, info); err != nil {
return err
}
node := &dataNodeInfo{
info: info,
status: offline,
}
c.dataNodes[info.Address] = node
}
dn, _ := c.kv.Load(clusterBuffer)
//TODO add not value error check
if dn != "" {
info := &datapb.DataNodeInfo{}
if err := proto.UnmarshalText(dn, info); err != nil {
return err
}
c.chanBuffer = info.Channels
}
return nil
}
func (c *clusterNodeManager) updateCluster(dataNodes []*datapb.DataNodeInfo) *clusterDeltaChange {
newNodes := make([]string, 0)
offlines := make([]string, 0)
restarts := make([]string, 0)
var onCnt, offCnt float64
currentOnline := make(map[string]struct{})
for _, n := range dataNodes {
currentOnline[n.Address] = struct{}{}
onCnt++
node, ok := c.dataNodes[n.Address]
if ok {
node.status = online
if node.info.Version != n.Version {
restarts = append(restarts, n.Address)
}
continue
}
newNodes = append(newNodes, n.Address)
}
for nAddr, node := range c.dataNodes {
_, has := currentOnline[nAddr]
if !has && node.status == online {
node.status = offline
offCnt++
offlines = append(offlines, nAddr)
}
}
metrics.DataCoordDataNodeList.WithLabelValues("online").Set(onCnt)
metrics.DataCoordDataNodeList.WithLabelValues("offline").Set(offCnt)
return &clusterDeltaChange{
newNodes: newNodes,
offlines: offlines,
restarts: restarts,
}
}
// updateDataNodes update dataNodes input mereged with existing cluster and buffer
func (c *clusterNodeManager) updateDataNodes(dataNodes []*datapb.DataNodeInfo, buffer []*datapb.ChannelStatus) error {
for _, node := range dataNodes {
c.dataNodes[node.Address].info = node
}
return c.txnSaveNodes(dataNodes, buffer)
}
// getDataNodes get current synced data nodes with buffered channel
func (c *clusterNodeManager) getDataNodes(onlyOnline bool) (map[string]*datapb.DataNodeInfo, []*datapb.ChannelStatus) {
ret := make(map[string]*datapb.DataNodeInfo)
for k, v := range c.dataNodes {
if !onlyOnline || v.status == online {
ret[k] = proto.Clone(v.info).(*datapb.DataNodeInfo)
}
}
return ret, c.chanBuffer
}
func (c *clusterNodeManager) register(n *datapb.DataNodeInfo) {
node, ok := c.dataNodes[n.Address]
if ok {
node.status = online
node.info.Version = n.Version
} else {
c.dataNodes[n.Address] = &dataNodeInfo{
info: n,
status: online,
}
}
c.updateMetrics()
}
// unregister removes node with specified address, returns node info if exists
func (c *clusterNodeManager) unregister(addr string) *datapb.DataNodeInfo {
node, ok := c.dataNodes[addr]
if !ok {
return nil
}
delete(c.dataNodes, addr)
node.status = offline
c.updateMetrics()
return node.info
}
func (c *clusterNodeManager) updateMetrics() {
var offCnt, onCnt float64
for _, node := range c.dataNodes {
if node.status == online {
onCnt++
} else {
offCnt++
}
}
metrics.DataCoordDataNodeList.WithLabelValues("online").Set(onCnt)
metrics.DataCoordDataNodeList.WithLabelValues("offline").Set(offCnt)
}
func (c *clusterNodeManager) txnSaveNodes(nodes []*datapb.DataNodeInfo, buffer []*datapb.ChannelStatus) error {
if len(nodes) == 0 && len(buffer) == 0 {
return nil
}
data := make(map[string]string)
for _, n := range nodes {
c.dataNodes[n.Address].info = n
key := clusterPrefix + n.Address
value := proto.MarshalTextString(n)
data[key] = value
}
c.chanBuffer = buffer
// short cut, reusing datainfo to store array of channel status
bufNode := &datapb.DataNodeInfo{
Channels: buffer,
}
data[clusterBuffer] = proto.MarshalTextString(bufNode)
return c.kv.MultiSave(data)
}
|
package functions
import (
"strings"
)
// GenPath returns a slice of strings created by splitting domain into its
// domain components and reversing the result.
func GenPath(domain string) []string {
dcs := strings.Split(domain, ".")
for i := len(dcs)/2 - 1; i >= 0; i-- {
opp := len(dcs) - 1 - i
dcs[i], dcs[opp] = dcs[opp], dcs[i]
}
return dcs
}
// RT returns in without a trailing dot.
func RT(in string) string {
return strings.TrimSuffix(in, ".")
}
type ProgressCallback func(string) bool
type ProgressStatusCallback func(string, error) bool
|
package main
import (
"fmt"
"html/template"
"io"
"log"
"net/http"
"os"
)
type BasePath struct {
basePath string
baseTmpPath string
}
func (bp BasePath) Path(filename string) string {
return bp.basePath + filename
}
func (bp BasePath) TmpPath(filename string) string {
return bp.baseTmpPath + filename
}
var TMPLATE_PATH BasePath = BasePath{
basePath: "F:\\my\\learn_go\\src\\server\\",
baseTmpPath: "F:\\my\\learn_go\\tmp\\",
}
var handlerFunc = func(w http.ResponseWriter, r *http.Request) {
t, err := template.ParseFiles(TMPLATE_PATH.Path("index.gtpl"))
if err != nil {
log.Println(err)
http.NotFoundHandler().ServeHTTP(w, r)
return
}
t.Execute(w, nil)
}
func loginFunC(w http.ResponseWriter, r *http.Request) {
fmt.Println("method:", r.Method) //获取请求的方法
if r.Method == "GET" {
t, err := template.ParseFiles(TMPLATE_PATH.Path("login.gtpl"))
if err != nil {
log.Println(err)
http.NotFoundHandler().ServeHTTP(w, r)
return
}
data := map[string]string{"username": "wskj", "password": "123456"}
t.Execute(w, data)
} else {
//请求的是登陆数据,那么执行登陆的逻辑判断
r.ParseForm()
fmt.Println("username:", r.Form["username"])
fmt.Println("password:", r.Form["password"])
}
}
func uploadFunc(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
t, err := template.ParseFiles(TMPLATE_PATH.Path("upload.gtpl"))
if err != nil {
log.Println(err)
http.NotFoundHandler().ServeHTTP(w, r)
return
}
t.Execute(w, nil)
} else {
http.FileServer()
r.ParseMultipartForm(10240)
appendName := r.FormValue("appendName")
fUpload, h, err := r.FormFile("uploadfile")
if err != nil {
log.Println(err)
return
}
defer fUpload.Close()
fileLocal, err := os.OpenFile(TMPLATE_PATH.TmpPath(h.Filename+"_"+appendName), os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return
}
defer fileLocal.Close()
io.Copy(fileLocal, fUpload)
}
}
func main() {
http.HandleFunc("/", handlerFunc)
http.HandleFunc("/login", loginFunC)
http.HandleFunc("/upload", uploadFunc)
err := http.ListenAndServe(":8181", nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package gostat
import (
"fmt"
"math"
"sort"
"strings"
)
type Stat struct {
bkts buckets // bucket currently filled, always ORDERED. Buckets are never empty. Their limit must NEVER touch and they should never overlap.
nbkt int // expected number of buckets
}
// NewStat with specified number of internal buckets
func NewStat(precision int) *Stat {
s := new(Stat)
s.nbkt = precision
s.bkts = make([]bucket, 0, s.nbkt+1) // prepare capacity for performance
return s
}
// String implements Stringer interface, for debugging.
func (s *Stat) String() string {
var sb strings.Builder
fmt.Fprintf(&sb, "Bucket dump (%d / %dbuckets)\n", s.bkts.Len(), s.nbkt)
c, m, v := s.CountMeanVar()
fmt.Fprintf(&sb, "Count\t%d\nMean\t%f \nVar\t%f \n", c, m, v)
fmt.Fprintf(&sb, "Min\t%f\nMax\t%f \n", s.Min(), s.Max())
fmt.Fprintf(&sb, "\t%s\n", bucket{}.Header())
for i, b := range s.bkts {
fmt.Fprintf(&sb, "%d\t%s\n", i, b.String())
}
return sb.String()
}
// Add any scalar value (in, uint, float, ..)
func (s *Stat) Add(data interface{}) {
switch v := data.(type) {
case int:
s.add(float64(v))
case int8:
s.add(float64(v))
case int16:
s.add(float64(v))
case int32:
s.add(float64(v))
case int64:
s.add(float64(v))
case uint:
s.add(float64(v))
case uint8:
s.add(float64(v))
case uint16:
s.add(float64(v))
case uint32:
s.add(float64(v))
case uint64:
s.add(float64(v))
case float32:
s.add(float64(v))
case float64:
s.add(float64(v))
default:
panic("Invalid type added to Stat object")
}
}
// add a float64
func (s *Stat) add(d float64) {
// try to put data in an existing bucket
for i, b := range s.bkts {
if b.high() > d {
break // will never fit in any existing bucket
}
if b.contains(d) { // found suitable bucket
s.bkts[i].add(d)
return
}
}
// create a dedicated bucket for this data that could not fit anywhere
b := bucket{}
b.c = d
b.add(d)
s.bkts = append(s.bkts, b)
sort.Sort(s.bkts)
// if bucket count is still reasonably low, we're done !
if s.bkts.Len() <= s.nbkt {
return
}
//fmt.Println(s)
//fmt.Println("Merge required")
// Here, we have too many buckets - we select the most attractive merge move and do it !
bi := 0 // best move index so far
bc := math.Inf(+1) // best cost so far
for i := 0; i < s.bkts.Len()-1; i++ {
if c := s.bkts.eval(i); c < bc {
bc = c
bi = i
}
}
// do the "best move"
// fmt.Println("Merging buckets :", bi, " and ", bi+1)
s.bkts = s.bkts.merge(bi)
//fmt.Println(s)
// Done !
}
// CountMeanVar provides exact values for count, mean, variance - more efficient by calculating all values at once.
func (s *Stat) CountMeanVar() (int, float64, float64) {
var m, v float64
var n int
for _, b := range s.bkts {
m += b.sum
v += b.sum2
n += b.n
}
m = m / float64(n)
v = v/float64(n-1) - m*m
return n, m, v
}
// Count provides exact values for count
func (s *Stat) Count() int {
var n int
for _, b := range s.bkts {
n += b.n
}
return n
}
// Min provides exact minimum value
func (s *Stat) Min() float64 {
return s.bkts[0].low()
}
// Min provides exact maximum value
func (s *Stat) Max() float64 {
return s.bkts[len(s.bkts)-1].high()
}
// NRepart gives an estimate f the number of data points that are below x (special rounding for x = c), assuming GAUSSIAN law.
func (s *Stat) NRepart(x float64) float64 {
var res float64
for _, b := range s.bkts {
res += b.NRepart(x)
}
return res
}
// Percentile provides the value such that p percent of the value are below, and 1-P and above.
func (s *Stat) Percentile(p float64) float64 {
if p < .0 || p > 1.0 {
panic("Invalid input")
}
// TOD0 - idea is to first locate the bucket that contanins the value,
// then solve using NPart to find the exact percentile.
panic("Not implemented")
}
|
package core
/*
file: doop.go
Only this file contains APIs that are exported to doop-core user.
*/
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"os"
"os/user"
"strings"
"github.com/amsa/doop/adapter"
. "github.com/amsa/doop/common"
//_ "github.com/mattn/go-sqlite3"
)
const (
DOOP_DIRNAME = ".doop"
DOOP_CONF_FILE = "config"
DOOP_MAPPING_FILE = "doopm"
DOOP_DEFAULT_BRANCH = "master"
DOOP_TABLE_BRANCH = "__branch"
DOOP_MASTER = "__doop_master"
)
type Doop struct {
homeDir string
//config DoopConfig
adapter adapter.Adapter
dbInfoMap map[string]*DoopDbInfo
}
type DoopConfig struct {
Database struct {
DSN string
}
}
func GetDoop() *Doop {
d := &Doop{}
// initialize Doop object
d.initDoopDir()
//d.initConfig()
//d.adapter = adapter.GetAdapter(d.config.Database.DSN)
return d
}
/* Private methods:
************************************
*/
// initDoopDir initializes Doop home directory and creates it if it does not exist
func (doop *Doop) initDoopDir() {
if doop.homeDir != "" {
return
}
currentUser, err := user.Current()
HandleError(err)
doop.homeDir = strings.Join([]string{currentUser.HomeDir, DOOP_DIRNAME}, string(os.PathSeparator))
if _, err := os.Stat(doop.homeDir); err != nil {
doop.install()
}
}
// initAdapter initializes Doop database adapter based on the given DSN string
func (doop *Doop) initAdapter(dsn string) {
doop.adapter = adapter.GetAdapter(dsn)
}
//initConfig loads and parses Doop configurations
/*func (doop *Doop) initConfig() {
if doop.config.Database.DSN != "" {
return
}
handleError(gcfg.ReadFileInto(&doop.config, doop.getConfigFile()))
}
func (doop *Doop) getConfigFile() string {
return strings.Join([]string{doop.homeDir, DOOP_CONF_FILE}, string(os.PathSeparator))
}*/
func (doop *Doop) getLogicalTables() map[string]string {
//find out the name of tables in default branch
statement := fmt.Sprintf(`
SELECT name, sql FROM %s WHERE branch=? AND type=?
`)
rows, err := doop.adapter.Query(statement, DOOP_MASTER, "logical_table")
HandleErrorAny(rows, err)
ret := make(map[string]string)
for rows.Next() {
var name string
var sql string
err := rows.Scan(&name, &sql)
HandleErrorAny(rows, err)
ret[name] = sql
}
return ret
}
// getDbInfoByDbName retrieves database info for the given database (name)
func (doop *Doop) getDbInfoByDbName(dbName string) *DoopDbInfo {
var info *DoopDbInfo = nil
for _, dbInfo := range doop.GetDbIdMap() {
if dbInfo.Name == dbName {
info = dbInfo
break
}
}
if info == nil {
fmt.Println("Could not find database: " + dbName)
os.Exit(1)
}
return info
}
// getDbMappingFile returns the path to Doop mapping file
func (doop *Doop) getDbMappingFile() string {
return strings.Join([]string{doop.homeDir, DOOP_MAPPING_FILE}, string(os.PathSeparator))
}
// setDbId returns the identifier (hash) for the given database name
func (doop *Doop) setDbId(dbName string, dbId string, dsn string) (bool, error) {
file, err := os.OpenFile(doop.getDbMappingFile(), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
HandleError(err)
defer file.Close()
file.WriteString(dbId + "," + dbName + "," + dsn + "\n")
return true, nil
}
// removeDbId removes database information from the mappin file
func (doop *Doop) removeDbId(dbId string) (bool, error) {
newMap := ""
delete(doop.dbInfoMap, dbId)
for _, v := range doop.dbInfoMap {
newMap += v.Hash + "," + v.Name + "," + v.DSN + "\n"
}
err := ioutil.WriteFile(doop.getDbMappingFile(), []byte(newMap), 0644)
return err == nil, err
}
/* Public methods:
************************************
*/
// getDbIdMap returns the mapping of all the database names with their identifiers
func (doop *Doop) GetDbIdMap() map[string]*DoopDbInfo {
if doop.dbInfoMap != nil {
return doop.dbInfoMap
}
doop.dbInfoMap = make(map[string]*DoopDbInfo)
file, err := os.OpenFile(doop.getDbMappingFile(), os.O_CREATE|os.O_RDONLY, 0644)
HandleError(err)
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
lArray := strings.Split(line, ",")
doop.dbInfoMap[lArray[0]] = &DoopDbInfo{Hash: lArray[0], Name: lArray[1], DSN: lArray[2]}
}
return doop.dbInfoMap
}
// TrackDb initializes the database directory with a given identifier (hash)
func (doop *Doop) TrackDb(dbName string, dsn string) (bool, error) {
doop.initAdapter(dsn)
dbId := GenerateDbId(dsn) // Generate the id (hash) from the given DSN
if _, ok := doop.GetDbIdMap()[dbId]; !ok {
// Set mapping for the new database
_, e := doop.setDbId(dbName, dbId, dsn)
if e != nil {
return false, e
}
// Create doop_master table to store metadata of all tables
statement := fmt.Sprintf(`
CREATE TABLE %s (
id integer NOT NULL PRIMARY KEY,
name text,
type text,
branch text,
sql text,
)
`, DOOP_MASTER)
HandleErrorAny(doop.adapter.Exec(statement))
//Insert tables into doop_master
tables, err := doop.adapter.GetTables()
HandleErrorAny(tables, err)
for i, table := range tables {
schema, err := doop.adapter.GetSchema(table)
HandleErrorAny(schema, err)
statement = fmt.Sprintf(`
INSERT INTO %s VALUES (
%i,
%s,
%s,
%s,
%sql,
)
`, i, table, "logical_table", DOOP_DEFAULT_BRANCH, schema)
}
// Create branch table to store the branches
HandleErrorAny(doop.adapter.Exec(`CREATE TABLE ` + DOOP_TABLE_BRANCH + ` (
id integer NOT NULL PRIMARY KEY,
name text,
parent text,
metadata text
);`))
HandleErrorAny(doop.adapter.Exec(`CREATE UNIQUE INDEX __branch_name_idx ON ` + DOOP_TABLE_BRANCH + ` (name);`))
// Create default branch
_, e = doop.CreateBranch(DOOP_DEFAULT_BRANCH, "")
if e != nil {
return false, e
}
return true, nil
}
return false, errors.New("Database already initialized!")
}
// ListDbs returns an array of all the tracked databases
func (doop *Doop) ListDbs() []string {
var list []string
for _, dbInfo := range doop.GetDbIdMap() {
list = append(list, dbInfo.Name)
}
return list
}
// UntrackDb untracks a database
func (doop *Doop) UntrackDb(dbName string) (bool, error) {
info := doop.getDbInfoByDbName(dbName)
doop.initAdapter(info.DSN)
doop.removeDbId(info.Hash)
HandleErrorAny(doop.adapter.Exec(`DROP INDEX IF EXISTS __branch_name_idx;`))
HandleErrorAny(doop.adapter.Exec(`DROP TABLE ` + DOOP_TABLE_BRANCH + `;`))
return false, nil
}
// CreateBranch creates a new branch of the database forking from the given parent branch
func (doop *Doop) CreateBranch(branchName string, parentBranch string) (bool, error) {
if branchName != DOOP_DEFAULT_BRANCH && parentBranch == "" {
return false, errors.New("Parent branch name is not specified.")
}
// insert a row with branch and its parent name along with metadata (empty json object for now)
HandleErrorAny(doop.adapter.
Exec(`INSERT INTO `+DOOP_TABLE_BRANCH+` (name, parent, metadata) VALUES (?, ?, '{}')`, branchName, parentBranch))
//get all tables
tables := doop.getLogicalTables()
//create companion tables for each logical table
for tableName, schema := range tables {
statements := make([]string, 0, 64)
//need to parse the schema to create h
//vdel
//TODO: parse schema to get primary key type,
//if no primary key, error thrown
vdel := fmt.Sprintf(`
CREATE TABLE __%s_%s_vdel (
%s
)
`, branchName, tableName, schema)
statements = append(statements, vdel)
//hdel
//vsec
//hsec
}
return true, nil
}
// RemoveBranch deletes a branch
func (doop *Doop) RemoveBranch(branchName string) (bool, error) {
return false, nil
}
// MergeBranch merges two branches into the first one (from)
func (doop *Doop) MergeBranch(from string, to string) (bool, error) {
return false, nil
}
// ListBranches returns the list of all the branches for the given database
func (doop *Doop) ListBranches(dbName string) []string {
doop.initAdapter(doop.getDbInfoByDbName(dbName).DSN)
rt := make([]string, 1)
rows, err := doop.adapter.Query(`SELECT name FROM ` + DOOP_TABLE_BRANCH + `;`)
HandleError(err)
for rows.Next() {
var name string
rows.Scan(&name)
rt = append(rt, name)
}
return rt
}
|
//Color is a simple package for printing in color to a windows or ansi console.
//Internally the package github.com/daviddengcn/go-colortext is used.
package color
import (
"fmt"
ct "github.com/daviddengcn/go-colortext"
)
//Println prints text to terminal with colors.
//At the end of the line the color will be reset.
func Println(msgs ...interface{}) {
for _, msg := range msgs {
color := ct.None
bgcolor := ct.None
message := ""
bright := false
switch msg.(type) {
case ColorMsg:
colorMsg := msg.(ColorMsg)
color = colorMsg.Color
bright = colorMsg.Bright
bgcolor = colorMsg.BgColor
message = colorMsg.Message
break
default:
message = fmt.Sprintf("%v", msg)
}
if color == ct.None && bgcolor == ct.None {
ct.ResetColor()
} else {
ct.ChangeColor(color, bright, bgcolor, false)
}
fmt.Print(message)
}
ct.ResetColor()
fmt.Println()
}
func None(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.None, false, format, v...)
}
func Black(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Black, false, format, v...)
}
func Red(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Red, false, format, v...)
}
func Green(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Green, false, format, v...)
}
func Yellow(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Yellow, false, format, v...)
}
func Blue(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Blue, false, format, v...)
}
func Magenta(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Magenta, false, format, v...)
}
func Cyan(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Cyan, false, format, v...)
}
func White(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.White, false, format, v...)
}
func BrRed(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Red, true, format, v...)
}
func BrGreen(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Green, true, format, v...)
}
func BrYellow(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Yellow, true, format, v...)
}
func BrBlue(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Blue, true, format, v...)
}
func BrMagenta(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Magenta, true, format, v...)
}
func BrCyan(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.Cyan, true, format, v...)
}
func BrWhite(format string, v ...interface{}) (msg ColorMsg) {
return newColorMsg(ct.White, true, format, v...)
}
//ColorMsg contains the message and color to use
type ColorMsg struct {
Color ct.Color
BgColor ct.Color
Message string
Bright bool
}
func newColorMsg(color ct.Color, bright bool, format string, v ...interface{}) (msg ColorMsg) {
msg.Color = color
msg.BgColor = ct.None
msg.Message = fmt.Sprintf(format, v...)
msg.Bright = bright
return
}
func (colorMsg ColorMsg) String() string {
return colorMsg.Message
}
//BgNone is not necessary since that is the default
func (colorMsg ColorMsg) BgNone() ColorMsg {
colorMsg.BgColor = ct.None
return colorMsg
}
func (colorMsg ColorMsg) BgBlack() ColorMsg {
colorMsg.BgColor = ct.Black
return colorMsg
}
func (colorMsg ColorMsg) BgRed() ColorMsg {
colorMsg.BgColor = ct.Red
return colorMsg
}
func (colorMsg ColorMsg) BgGreen() ColorMsg {
colorMsg.BgColor = ct.Green
return colorMsg
}
func (colorMsg ColorMsg) BgYellow() ColorMsg {
colorMsg.BgColor = ct.Yellow
return colorMsg
}
func (colorMsg ColorMsg) BgBlue() ColorMsg {
colorMsg.BgColor = ct.Blue
return colorMsg
}
func (colorMsg ColorMsg) BgMagenta() ColorMsg {
colorMsg.BgColor = ct.Magenta
return colorMsg
}
func (colorMsg ColorMsg) BgCyan() ColorMsg {
colorMsg.BgColor = ct.Cyan
return colorMsg
}
func (colorMsg ColorMsg) BgWhite() ColorMsg {
colorMsg.BgColor = ct.White
return colorMsg
}
|
package main
import "fmt"
type S struct {
opt1 string
opt2 int
}
func (s *S) String() string {
return fmt.Sprintf("S{opt1: %q, opt2: %d}", s.opt1, s.opt2)
}
type Option func(*S) error // HL
func Opt1(v string) Option {
return func(s *S) error {
s.opt1 = v
return nil
}
}
func Opt2(v int) Option {
return func(s *S) error {
s.opt2 = v
return nil
}
}
func New(opts ...Option) (*S, error) {
s := &S{}
for _, opt := range opts {
if err := opt(s); err != nil {
return nil, err
}
}
return s, nil
}
func main() {
s, err := New(Opt1("hello"), Opt2(42))
if err != nil {
panic(err)
}
fmt.Printf("%#v\n", s)
// Output: &main.S{opt1: "hello", opt2: 42}
fmt.Println(s)
// Output: S{opt1: "hello", opt2: 42}
_ = s
}
|
package main
import (
"flag"
"log"
"net/http"
"os"
"os/exec"
"strings"
"github.com/datacratic/goship/ship"
)
func main() {
address := flag.String("address", ":8080", "address of the web server")
directory := flag.String("directory", "", "directory location")
hostname := flag.String("hostname", "", "URL used by clients to reach the server")
flag.Parse()
s := &ship.Server{
Root: *directory,
Host: *hostname,
}
if s.Root == "" {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
s.Root = wd
}
if s.Host == "" {
result, err := exec.Command("hostname", "-f").Output()
if err != nil {
log.Fatal(err)
}
s.Host = "http://" + strings.TrimSpace(string(result)) + *address
}
if err := s.Start(); err != nil {
log.Fatal(err)
}
log.Println("installing server at", s.Host)
err := http.ListenAndServe(*address, nil)
if err != nil {
log.Fatal(err)
}
}
|
package ravendb
import (
"net/http"
)
var (
_ RavenCommand = &ExplainQueryCommand{}
)
type ExplainQueryResult struct {
Index string `json:"Index"`
Reason string `json:"Reason"`
}
type ExplainQueryCommand struct {
RavenCommandBase
_conventions *DocumentConventions
_indexQuery *IndexQuery
Result []*ExplainQueryResult
}
func NewExplainQueryCommand(conventions *DocumentConventions, indexQuery *IndexQuery) *ExplainQueryCommand {
panicIf(conventions == nil, "Conventions cannot be null")
panicIf(indexQuery == nil, "IndexQuery cannot be null")
cmd := &ExplainQueryCommand{
RavenCommandBase: NewRavenCommandBase(),
_conventions: conventions,
_indexQuery: indexQuery,
}
cmd.IsReadRequest = true
return cmd
}
func (c *ExplainQueryCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/queries?debug=explain"
v := jsonExtensionsWriteIndexQuery(c._conventions, c._indexQuery)
d, err := jsonMarshal(v)
panicIf(err != nil, "jsonMarshal() failed with %s", err)
return NewHttpPost(url, d)
}
func (c *ExplainQueryCommand) SetResponse(response []byte, fromCache bool) error {
var res struct {
Results []*ExplainQueryResult
}
err := jsonUnmarshal(response, &res)
if err != nil {
return err
}
if res.Results == nil {
return throwInvalidResponse()
}
c.Result = res.Results
return nil
}
|
package middleware
import (
"errors"
"fmt"
"github.com/danilopolani/gocialite/structs"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/leachim2k/go-shorten/pkg/cli/shorten/options"
"golang.org/x/oauth2"
"net/http"
"os"
"strings"
"time"
)
type AuthCustomClaims struct {
Name string `json:"name"`
Email string `json:"email"`
Provider string `json:"p"`
jwt.StandardClaims
}
const (
issuer = "GoShorten"
)
func GetSecretKey() string {
secret := os.Getenv("JWT_SECRET")
if secret == "" {
secret = "secret"
}
return secret
}
func JWTAuthenticator(c *gin.Context) {
authHeader := c.Request.Header.Get("Authorization")
claims, err := GetClaimFromAuthHeader(authHeader)
if err != nil {
c.AbortWithError(http.StatusForbidden, err)
return
}
c.Set("JWT_CLAIMS", claims)
}
func GetClaimFromAuthHeader(authHeader string) (*AuthCustomClaims, error) {
if authHeader == "" || !strings.HasPrefix(authHeader, "Bearer ") {
return nil, fmt.Errorf("invalid Token Type (Bearer only)")
}
authHeaderParts := strings.Split(authHeader, " ")
if len(authHeaderParts) != 2 {
return nil, fmt.Errorf("detected bearer token, but in invalid format")
}
// Validate the JWT is valid
claims, err := ValidateJWT(authHeaderParts[1])
if err != nil {
return nil, err
}
return claims, err
}
func ValidateJWT(tokenString string) (*AuthCustomClaims, error) {
claimsStruct := AuthCustomClaims{}
token, err := jwt.ParseWithClaims(
tokenString,
&claimsStruct,
func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("invalid signing method")
}
return []byte(GetSecretKey()), nil
/*
pem, err := getGooglePublicKey(fmt.Sprintf("%s", token.Header["kid"]))
if err != nil {
return nil, err
}
key, err := jwt.ParseRSAPublicKeyFromPEM([]byte(pem))
if err != nil {
return nil, err
}
return key, nil
*/
},
)
if err != nil {
return &AuthCustomClaims{}, err
}
claims, ok := token.Claims.(*AuthCustomClaims)
if !ok {
return &AuthCustomClaims{}, errors.New("Invalid Google JWT")
}
if claims.Issuer != issuer {
return &AuthCustomClaims{}, errors.New("iss is invalid")
}
if claims.ExpiresAt < time.Now().UTC().Unix() {
return &AuthCustomClaims{}, errors.New("JWT is expired")
}
return claims, nil
}
func BuildJWTToken(user *structs.User, token *oauth2.Token, provider string) (*oauth2.Token, error) {
prefix := ""
for _, service := range options.Current.AuthServices {
if service.Name == provider {
prefix = service.Prefix + "_"
}
}
expiry := token.Expiry
if expiry.Unix() == 0 || expiry.IsZero() {
expiry = time.Now().AddDate(0, 1, 0)
}
claims := &AuthCustomClaims{
Name: user.FullName,
Email: user.Email,
Provider: provider,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expiry.Unix(),
Id: user.ID,
Subject: prefix + user.ID,
IssuedAt: time.Now().Unix(),
Issuer: issuer,
},
}
tkn := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
t, err := tkn.SignedString([]byte(GetSecretKey()))
if err != nil {
return nil, err
}
t2 := oauth2.Token{
AccessToken: t,
TokenType: "Bearer",
Expiry: token.Expiry,
}
return &t2, nil
}
|
package account
import "sync"
// Account structure
type Account struct {
balance int64
closed bool
mux sync.Mutex
}
// Open opens a new account.
func Open(initialDeposit int64) *Account {
if initialDeposit >= 0 {
return &Account{initialDeposit, false, sync.Mutex{}}
}
return nil
}
// Close closes the current account and returns all balance in the account.
func (t *Account) Close() (payout int64, ok bool) {
t.mux.Lock()
defer t.mux.Unlock()
if !t.closed {
t.closed = true
return t.balance, true
}
return 0, false
}
// Balance returns current balance of an account.
func (t *Account) Balance() (balance int64, ok bool) {
t.mux.Lock()
defer t.mux.Unlock()
if !t.closed {
return t.balance, true
}
return 0, false
}
// Deposit adds an 'amount' into current balance and returns the added balance.
func (t *Account) Deposit(amount int64) (newBlance int64, ok bool) {
t.mux.Lock()
defer t.mux.Unlock()
if !t.closed {
if t.balance+amount >= 0 {
t.balance += amount
return t.balance, true
}
return 0, false
}
return 0, false
}
|
package wait
import (
"github.com/thingsplex/tpflow/node/base"
"time"
)
import (
"github.com/futurehomeno/fimpgo"
"github.com/thingsplex/tpflow/model"
)
type WaitNode struct {
base.BaseNode
delay int
ctx *model.Context
transport *fimpgo.MqttTransport
}
func NewWaitNode(flowOpCtx *model.FlowOperationalContext, meta model.MetaNode, ctx *model.Context) model.Node {
node := WaitNode{ctx: ctx}
node.SetMeta(meta)
node.SetFlowOpCtx(flowOpCtx)
return &node
}
func (node *WaitNode) LoadNodeConfig() error {
delay, ok := node.Meta().Config.(float64)
if ok {
node.delay = int(delay)
} else {
node.GetLog().Error(" Can't cast Wait node delay value")
}
return nil
}
func (node *WaitNode) WaitForEvent(nodeEventStream chan model.ReactorEvent) {
}
func (node *WaitNode) OnInput(msg *model.Message) ([]model.NodeID, error) {
node.GetLog().Info(" Waiting for = ", node.delay)
timer := time.NewTimer(time.Millisecond * time.Duration(node.delay))
select {
case <-timer.C:
return []model.NodeID{node.Meta().SuccessTransition}, nil
case signal := <-node.FlowOpCtx().NodeControlSignalChannel:
timer.Stop()
node.GetLog().Debug("Control signal SIGNAL_TERMINATE_WAITING")
if signal == model.SIGNAL_TERMINATE_WAITING {
return nil, nil
}
}
return []model.NodeID{node.Meta().SuccessTransition}, nil
}
|
package godis
import (
"log"
"os"
"sync"
"github.com/callduckk/YSGo/godis/cron"
)
type ServerType int
const (
WithBackup ServerType = iota + 1
WithoutBackup
)
func buildGodisServer(serverType ServerType, loadBackup bool) *GodisServer {
server := &GodisServer{}
server.dictionary = &sync.Map{}
if serverType == WithBackup {
intervalStr := os.Getenv("GodisBackupInterval")
if intervalStr == "" {
log.Println("'GodisBackupInterval' env variable was not set. Falling back to 30m")
intervalStr = "30m"
}
intervalDuration := parseString(intervalStr)
log.Printf("Backup Interval: %s\n", intervalStr)
log.Printf("Backup path: %s\n", getLatestBackupFileName())
cron.CreateCron(intervalDuration, func() {
log.Println("Backing up to file.")
server.backupToFile()
})
log.Println("Backup cron has been created successfully.")
}
if loadBackup {
log.Println("Restoring latest backup.")
server.loadFromFile()
}
return server
}
|
package task
import (
"fmt"
"strings"
"sync"
)
var controllerInstance *controller
var controllerOnce sync.Once
//controller 任务控制器
type controller struct {
taskFactoryMap map[int]Factory
taskMap map[string]Task
taskMapRWLock sync.RWMutex
}
func InitTaskController(factoryMap map[int]Factory) {
controllerOnce.Do(func() {
controllerInstance = &controller{
taskFactoryMap: factoryMap,
taskMap: make(map[string]Task),
}
})
}
//GetControllerInstance 获取任务控制器单例
func GetControllerInstance() *controller {
return controllerInstance
}
//Create 创建任务
func (c *controller) Create(taskID string, taskType int, taskConfigInfo interface{}) error {
if strings.EqualFold(taskID, "") {
return fmt.Errorf("task id is nil")
}
c.taskMapRWLock.Lock()
defer c.taskMapRWLock.Unlock()
if c.taskMap[taskID] != nil {
return fmt.Errorf("task not exit in taskMap")
}
taskFactory, ok := controllerInstance.taskFactoryMap[taskType]
if !ok {
return fmt.Errorf("task type not registered in task factory")
}
task, err := taskFactory.CreateTask(taskID, taskConfigInfo)
if err != nil {
return err
}
controllerInstance.taskMap[taskID] = task
go func() {
task.Run()
}()
return nil
}
//Start 开启任务
func (c *controller) Start(taskID string) error {
c.taskMapRWLock.Lock()
task := c.taskMap[taskID]
if task == nil {
c.taskMapRWLock.Unlock()
return fmt.Errorf("task id = %s not exit", taskID)
}
c.taskMapRWLock.Unlock()
return task.Start()
}
//Stop 暂停任务
func (c *controller) Stop(taskID string) error {
c.taskMapRWLock.Lock()
task := c.taskMap[taskID]
if task == nil {
c.taskMapRWLock.Unlock()
return fmt.Errorf("task id = %s not exit", taskID)
}
c.taskMapRWLock.Unlock()
return task.Stop()
}
//Cancel 取消任务
func (c *controller) Cancel(taskID string) error {
c.taskMapRWLock.Lock()
task := c.taskMap[taskID]
if task == nil {
c.taskMapRWLock.Unlock()
return fmt.Errorf("task id = %s not exit", taskID)
}
c.taskMapRWLock.Unlock()
return task.Cancel()
}
func (c *controller) GetStatus(taskID string) interface{} {
c.taskMapRWLock.Lock()
task := c.taskMap[taskID]
if task == nil {
c.taskMapRWLock.Unlock()
return fmt.Errorf("task id = %s not exit", taskID)
}
c.taskMapRWLock.Unlock()
return task.GetInfo()
}
|
package iterators
// SingleValue creates an iterator that can return one single element and will ensure that Next can only be called once.
func SingleValue[T any](v T) Iterator[T] {
return &singleValueIter[T]{V: v}
}
type singleValueIter[T any] struct {
V T
index int
closed bool
}
func (i *singleValueIter[T]) Close() error {
i.closed = true
return nil
}
func (i *singleValueIter[T]) Next() bool {
if i.closed {
return false
}
if i.index == 0 {
i.index++
return true
}
return false
}
func (i *singleValueIter[T]) Err() error {
return nil
}
func (i *singleValueIter[T]) Value() T {
return i.V
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
//"strconv"
"strings"
mf "github.com/mixamarciv/gofncstd3000"
//"github.com/gorilla/sessions"
)
//авторизация в вк апи
func http_auth_vk(w http.ResponseWriter, r *http.Request) {
d := map[string]interface{}{}
get_vars, _ := url.ParseQuery(r.URL.RawQuery)
d["url_rawquery"] = r.URL.RawQuery
d["get_vars"] = get_vars
code := get_vars.Get("code")
if len(code) > 0 {
vkapi := gcfg_app["vkapi"].(map[string]interface{})
//отправляем апи запрос на получение access_token
urlstr := "https://oauth.vk.com/access_token?"
urlstr += "client_id=" + vkapi["id"].(string) + "&client_secret=" + vkapi["secret"].(string)
urlstr += "&code=" + code + "&redirect_uri=http://anykey.vrashke.net/auth_vk"
d2 := http_auth_vk_send_http_request(urlstr)
_, err1 := d2["err"]
_, err2 := d2["error"]
if err1 || err2 {
RenderTemplate(w, r, d2, "maintemplate.html", "login.html")
return
}
//отправляем запрос на получение данных пользователя
urlstr = "https://api.vk.com/method/users.get?uid=" + floatToStr(d2["user_id"])
urlstr += "&access_token=" + d2["access_token"].(string)
urlstr += "&v=5.8"
d3 := http_auth_vk_send_http_request(urlstr)
_, err1 = d3["err"]
_, err2 = d3["error"]
if err1 || err2 {
RenderTemplate(w, r, d3, "maintemplate.html", "login.html")
return
}
//проверяем результаты:
resp, ok := d3["response"]
if !ok {
d3["error"] = fmt.Errorf("http_auth_vk ERROR001: no response")
RenderTemplate(w, r, d3, "maintemplate.html", "login.html")
return
}
tresp := fmt.Sprintf("%T", resp)
if tresp != "[]interface {}" || fmt.Sprintf("%T", resp.([]interface{})[0]) != "map[string]interface {}" {
d3["error"] = fmt.Errorf("http_auth_vk ERROR002: bad response")
RenderTemplate(w, r, d3, "maintemplate.html", "login.html")
return
}
//собираем все в один map
for k, v := range resp.([]interface{})[0].(map[string]interface{}) {
d2[k] = v
}
//сохраняем авторизацию текущего юзера в бд и получаем его права доступа к бд
http_auth_vk_load_user_data(d2)
user_data := d2
//если при загрузке и/или авторизации были ошибки то выводи ошибки и отменяем авторизацию
_, err1 = user_data["error"]
if err1 {
RenderTemplate(w, r, user_data, "maintemplate.html", "login.html")
return
}
//все прошло отлично
//сохраняем данные юзера в текущей сессии
//sess := GetSess(w, r)
//sess.Values["user"] = mf.ToJsonStr(user_data)
//sess.Save(r, w)
SetSessUserData(w, r, mf.ToJsonStr(user_data))
//{
// sess, _ := sess_store.Get(r, gcfg_secret_cookie_name)
// sess.Values["user"] = mf.ToJsonStr(user_data)
// sess.Save(r, w)
// sessions.Save(r, w)
//}
//**********************************************************
d["success"] = "авторизация пользователя " + user_data["name"].(string) + " успешно пройдена "
RenderTemplate(w, r, d, "maintemplate.html", "login.html")
return
}
RenderTemplate(w, r, d, "maintemplate.html", "login.html")
return
}
//map[uid:1.42080324e+08 first_name:Михаил last_name:Марцив access_token:6b...9a9 expires_in:86400 user_id:1.42080324e+08]
//загружаем данные пользователя из бд или задаем значения по умолчанию и сохраняем нового юзера в бд
func http_auth_vk_load_user_data(d map[string]interface{}) {
_, err1 := d["err"]
_, err2 := d["error"]
if err1 || err2 {
return
}
d["id"] = floatToStr(d["user_id"])
name := d["first_name"].(string) + " " + d["last_name"].(string)
default_fdata := `{"accessdb":{"a":"1","p":"1","w":"0"},"first_name":"` + d["first_name"].(string) + `","last_name":"` + d["last_name"].(string) + `"}`
//права по умолчанию 0-нет доступа,
//1-только чтение, 2-запись, 3-модерация чужих записей(подтверждение и пубдикация записей)
db := dbmap["users"].DB
prev_query := ""
//получаем текущие данны пользователя в бд
query := "SELECT uuid,name,fdata FROM tuser WHERE id='" + d["id"].(string) + "' AND type='vk'"
rows, err := db.Query(query)
if err != nil {
d["error"] = fmtError("http_auth_vk_load_user_data ERROR001 db.Query(query): query:\n"+query+"\n\nprev_query:\n"+prev_query+"\n\n", err)
return
}
prev_query = query
d["uuid_user"] = ""
for rows.Next() {
var uuid_user, name, fdata NullString
if err := rows.Scan(&uuid_user, &name, &fdata); err != nil {
d["error"] = fmtError("http_auth_vk_load_user_data ERROR002 rows.Scan: query:\n"+query+"\n\nprev_query:\n"+prev_query+"\n\n", err)
return
}
d["uuid_user"] = uuid_user.get("")
d["name"] = name.get("")
//d["fdata"] = mf.FromJsonStr([]byte(fdata.get(default_fdata)))
d["fdata"] = fdata.get(default_fdata)
}
rows.Close()
//если пользователя ещё нет в системе то регистрируем его в бд
if d["uuid_user"].(string) == "" {
d["uuid_user"] = mf.StrUuid()
d["name"] = name
d["fdata"] = default_fdata
name := strings.Replace(d["name"].(string), "'", "''", -1)
fdata := strings.Replace(d["fdata"].(string), "'", "''", -1)
query := "INSERT INTO tuser(uuid,id,name,fdata,type) VALUES('" + d["uuid_user"].(string) + "','" + d["id"].(string) + "'" +
",'" + name + "','" + fdata + "','vk')"
_, err := db.Exec(query)
if err != nil {
d["error"] = fmtError("http_auth_vk_load_user_data ERROR003 db.Exec(query): query:\n"+query+"\n\nprev_query:\n"+prev_query+"\n\n", err)
return
}
prev_query = query
}
//если имя пользователя изменилось
if d["name"].(string) != name {
t := mf.FromJsonStr([]byte(d["fdata"].(string)))
t["last_name"] = d["last_name"].(string)
t["first_name"] = d["first_name"].(string)
d["fdata"] = mf.ToJsonStr(t)
name := strings.Replace(name, "'", "''", -1)
fdata := strings.Replace(d["fdata"].(string), "'", "''", -1)
query := "UPDATE tuser SET fdata='" + fdata + "',name='" + name + "' WHERE uuid='" + d["uuid_user"].(string) + "' AND type='vk'"
_, err := db.Exec(query)
if err != nil {
d["error"] = fmtError("http_auth_vk_load_user_data ERROR004 db.Exec(query): query:\n"+query+"\n\nprev_query:\n"+prev_query+"\n\n", err)
return
}
prev_query = query
}
{ //и в любом случае регистрируем его регистрацию в системе с теми правами которые он получает при регистрации
d["uuid_auth"] = mf.StrUuid()
fdata := strings.Replace(d["fdata"].(string), "'", "''", -1)
query := "INSERT INTO TUSER_AUTH_VK(uuid,uuid_tuser,fdata,access_token) VALUES('" + d["uuid_auth"].(string) + "'" +
",'" + d["uuid_user"].(string) + "','" + fdata + "','" + d["access_token"].(string) + "')"
_, err := db.Exec(query)
if err != nil {
d["error"] = fmtError("http_auth_vk_load_user_data ERROR005 db.Exec(query): query:\n"+query+"\n\nprev_query:\n"+prev_query+"\n\n", err)
return
}
prev_query = query
}
d["fdata"] = mf.FromJsonStr([]byte(d["fdata"].(string)))
d["type"] = "vk"
//удаляем лишние данные, все необходимое уже сохранено в бд, далее при внесении изменений сверяем данные сессии с бд
delete(d, "access_token")
delete(d, "first_name")
delete(d, "last_name")
delete(d, "expires_in")
delete(d, "user_id")
delete(d, "uid")
return
}
//отправляем запрос
func http_auth_vk_send_http_request(urlStr string) map[string]interface{} {
LogPrint("http_auth_vk_send_http_request urlStr: " + urlStr + "\n")
//data := url.Values{}
ret := make(map[string]interface{})
client := &http.Client{}
r, err := http.NewRequest("GET", urlStr, nil)
LogPrintErrAndExit("http_auth_vk_send_http_request error001: \n urlStr: "+urlStr+"\n\n", err)
r.Header.Add("method", "GET")
r.Header.Add("path", "/")
r.Header.Add("scheme", "https")
r.Header.Add("accept", "text/json")
resp, err := client.Do(r)
if err != nil {
ret["err"] = fmt.Sprintf("http_auth_vk_send_http_request ERROR002 client.Do: %#v", err)
return ret
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
ret["err"] = fmt.Sprintf("http_auth_vk_send_http_request ERROR003 ioutil.ReadAll(resp.Body): %#v", err)
return ret
}
ret, err = mf.FromJson(body)
if err != nil {
ret["err"] = fmt.Sprintf("http_auth_vk_send_http_request ERROR004 Unmarshal json error: %#v", err)
return ret
}
return ret
}
|
package adasync
import (
"github.com/adamcolton/err"
"os"
"path/filepath"
"sort"
)
func (ins *Instance) SelfUpdate() {
err.Debug("Self Update: ", ins.pathStr)
diff := ins.SelfDiff()
// directories need to be resolved first, otherwise if a directory was
// renamed, every file will think it was moved.
diff.resolveDirectories()
diff.resolveFiles()
diff.resolveDeleted()
}
// SelfDiff
// A note on hashes: if there were two copies of a file and not there is only
// one and it has moved, we can't tell which one it was, the way we treat hashes
// will pick one
func (ins *Instance) SelfDiff() *deltaSelf {
diff := &deltaSelf{
removed: make(map[string]*Resource),
removedByHash: make(map[string]*Resource),
ins: ins,
deleted: make(map[string]*Resource),
}
return diff
}
//both are used as sets, not maps
type deltaSelf struct {
added []string
removed map[string]*Resource // path -> resource
removedByHash map[string]*Resource // hash -> resource
ins *Instance
deleted map[string]*Resource // id -> resource
}
// addDirs is used to walk the directory
func (d *deltaSelf) addDirs(pathStr string, fi os.FileInfo, _ error) error {
if !fi.IsDir() {
return nil
}
// sanitize pathStr
pathStr = PathFromString(endingSlash(pathStr), d.ins.pathStr).String()
d.checkFile(pathStr)
return nil
}
// add adds a path to a deltaSelf. If the path is in "removed"
// then it's a known resource and it's removed from removed
// if not, then it's a new resources and is added to added.
func (d *deltaSelf) addFiles(pathStr string, fi os.FileInfo, _ error) error {
if fi.IsDir() {
return nil
}
path := PathFromString(pathStr, d.ins.pathStr)
// skip any files ending in .collection
if endsWith(path.name, ".collection") {
return nil
}
pathStr = path.String()
d.checkFile(pathStr)
return nil
}
func (d *deltaSelf) checkFile(pathStr string) {
if res, ok := d.removed[pathStr]; ok && d.ins.pathEqualsResource(res, pathStr) {
delete(d.removed, pathStr)
delete(d.removedByHash, res.Hash.String())
} else {
d.added = append(d.added, pathStr)
}
}
func (d *deltaSelf) resolveDeleted() {
for _, res := range d.removed {
d.ins.dirty = true
res.PathNodes.Add(d.ins.PathNodeFromHash(nil, ".deleted"))
}
}
func (d *deltaSelf) resolveDirectories() {
// Put everything in removed and remove from removed
// as we find each. What's left is what was actually
// removed
for _, dir := range d.ins.directories {
if pn := dir.PathNodes.Last(); pn.ParentID != nil || pn.Name != ".deleted" {
d.removed[pn.FullPath()] = dir.Resource
d.removedByHash[dir.ID.String()] = dir.Resource
} else if pn.Name == ".deleted" {
d.deleted[dir.ID.String()] = dir.Resource
}
}
d.added = make([]string, 0)
filepath.Walk(d.ins.pathStr, d.addDirs)
// We sort so that files will be added in an order such that a child can
// always add itself to it's parent. But there may be cases involving moving
// where that won't work
sort.Sort(ByLength(d.added))
for h, res := range d.removedByHash {
err.Debug(h, res.RelativePath())
}
for _, newPathStr := range d.added {
d.ins.dirty = true
newPath := PathFromString(newPathStr, d.ins.pathStr) //*Path
pathNode := d.ins.PathToNode(newPath) //*PathNode
hash, _, _ := newPath.Stat()
err.Debug(hash, newPathStr)
if res, ok := d.removedByHash[hash.String()]; ok {
// resource was moved
err.Debug("Moved: ", res.FullPath())
err.Debug("To: ", newPathStr)
delete(d.removed, res.FullPath())
delete(d.removedByHash, hash.String())
parent := res.PathNodes.Last().Parent()
delete(parent.directories, res.RelativePath().name)
res.PathNodes.Add(pathNode)
parent.directories[res.RelativePath().name] = d.ins.directories[res.ID.String()]
} else {
// resource is new
err.Debug("Added: ", newPathStr)
dir := d.ins.AddDirectoryWithPath(hash, pathNode)
dir.PathNodes.Last().Parent().directories[dir.RelativePath().name] = dir
}
}
}
func (d *deltaSelf) resolveFiles() {
// Put everything in removed and remove from removed
// as we find each. What's left is what was actually
// removed
for _, res := range d.ins.resources {
if pn := res.PathNodes.Last(); pn.ParentID != nil || pn.Name != ".deleted" {
d.removed[pn.FullPath()] = res
d.removedByHash[res.Hash.String()] = res
} else if pn.Name == ".deleted" {
d.deleted[res.ID.String()] = res
}
}
d.added = make([]string, 0)
filepath.Walk(d.ins.pathStr, d.addFiles)
for _, newPathStr := range d.added {
d.ins.dirty = true
newPath := PathFromString(newPathStr, d.ins.pathStr) //*Path
pathNode := d.ins.PathToNode(newPath) //*PathNode
hash, _, size := newPath.Stat()
if res, ok := d.removedByHash[hash.String()]; ok {
// resource was moved
err.Debug("Moved: ", res.FullPath())
err.Debug("To: ", newPathStr)
delete(d.removed, res.FullPath())
delete(d.removedByHash, hash.String())
res.PathNodes.Add(pathNode)
} else {
// resource is new
err.Debug("Added: ", newPathStr)
r := d.ins.AddResourceWithPath(hash, size, pathNode)
err.Debug(r.Size, size)
}
}
}
// BadInstanceScan this is a debugging tool
// despite my best efforts, unit testing has not caught all the errors, this
// can help find additional errors under real conditions
func (ins *Instance) BadInstanceScan() {
for _, d := range ins.directories {
pathNode := d.PathNodes.Last()
name := pathNode.Name
if name[len(name)-1] != '/' {
err.Debug("--- Bad Directory Name", d.FullPath())
}
if name != ".deleted" {
if root, ok := pathNode.getRoot(); !ok || root != ins.root.PathNodes.Last() {
err.Debug("--- Bad root", d.FullPath())
}
}
if pathNode.ParentID != nil {
if _, ok := ins.directories[pathNode.ParentID.String()]; !ok {
err.Debug("--- Did not find parent", d.FullPath())
}
}
}
for _, d := range ins.resources {
pathNode := d.PathNodes.Last()
name := pathNode.Name
if name != ".deleted" {
if root, ok := pathNode.getRoot(); !ok || root != ins.root.PathNodes.Last() {
err.Debug("--- Bad root", d.FullPath())
}
}
if pathNode.ParentID != nil {
if _, ok := ins.directories[pathNode.ParentID.String()]; !ok {
err.Debug("--- Did not find parent", d.FullPath(), pathNode.ParentID)
}
}
}
}
|
package main
import (
"testing"
"strconv"
"github.com/stretchr/testify/assert"
)
func TestCases(t *testing.T) {
tcs := []struct {
books []int
cost int64
}{
{[]int{3, 2, 3, 2}, 8},
{[]int{6, 4, 5, 5, 5, 5}, 21},
{[]int{}, 0},
{[]int{100}, 100},
{[]int{100000, 100000, 100000, 100000}, 300000},
{[]int{100000, 100000, 100000, 100000}, 300000},
}
for idx, tc := range tcs {
t.Run(strconv.Itoa(idx), func(inner *testing.T) {
assert.Equal(inner, tc.cost, totalCost(tc.books))
})
}
books := make([]int, 0)
its := 1432
bulk := 15
cost := 100000
for i := 0; i < its; i++ {
books = append(books, []int{
cost, cost, cost,
cost, cost, cost,
cost, cost, cost,
cost, cost, cost,
cost, cost, cost,
}...)
}
ttl := int64(cost*bulk*its) - int64(cost*(bulk/3)*its)
assert.Equal(t, ttl, totalCost(books))
}
|
package dao
import (
"database/sql"
"errors"
_ "github.com/go-sql-driver/mysql"
xerrors "github.com/pkg/errors"
)
var (
ErrorNotRows = errors.New("There is no data.")
)
// user info
type User struct {
ID int
Name string
}
func GetUserByID(id int) (string, error) {
db, err := sql.Open("mysql", "root:123456@tcp(127.0.0.1:3306)/mysql?charset=utf8")
if err != nil {
return "", xerrors.Wrap(err, "db conneted faild.")
}
var name string
err = db.QueryRow("Select id, name from user where id=?", id).Scan(&name)
if err != nil {
return "", xerrors.Wrap(err, "search failed")
}
return name, nil
}
|
//数据库的插入操作
package sorm
import (
"errors"
"fmt"
"log"
"reflect"
"strings"
)
//insert into user (age,first_name,last_name) values (20,'Tom','One')
func (q *Query) Insert(in interface{}) (int64, error) {
var keys, values []string
v := reflect.ValueOf(in)
//剥离指针
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
//判断in的类型
switch v.Kind() {
case reflect.Struct:
keys, values = sKV(v)
case reflect.Map:
keys, values = mKV(v)
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
sv := v.Index(i)
//剥离指针
for sv.Kind() == reflect.Interface || sv.Kind() == reflect.Ptr {
sv = sv.Elem()
}
//&{"Tom","One"} {"Tom","Second"}
if sv.Kind() != reflect.Struct {
return 0, errors.New("method Insert error: in slices is not struct!")
}
if len(keys) == 0 {
keys, values = sKV(sv)
continue
}
//key保存一次就行了
_, val := sKV(sv)
values = append(values, val...)
}
default:
return 0, errors.New("method Insert error: type error!")
}
keysLen := len(keys)
valuesLen := len(values)
if keysLen == 0 || valuesLen == 0 {
return 0, errors.New("method Insert error:no data!")
}
var insertValue string
//插入多条记录时,使用 "," 拼接values
if keysLen < valuesLen {
var tmpValues []string
for keysLen <= valuesLen {
if keysLen%(len(keys)) == 0 {
tmpValues = append(tmpValues, fmt.Sprintf("(%s)", strings.Join(values[keysLen-len(keys):keysLen], ",")))
}
keysLen++
}
insertValue = strings.Join(tmpValues, ",")
} else {
insertValue = fmt.Sprintf("(%s)", strings.Join(values, ","))
}
query := fmt.Sprintf("insert into %s (%s) values %s", q.table, strings.Join(keys, ","), insertValue)
log.Printf("insert sql:%s\n", query)
state, err := q.db.Prepare(query)
if err != nil {
return 0, err
}
result, err := state.Exec()
if err != nil {
return 0, err
}
return result.LastInsertId()
}
|
package handlers
import (
"bytes"
"reflect"
"testing"
)
func TestYamlFormatter_Run(t *testing.T) {
type fields struct {
filepath string
}
tests := []struct {
name string
fields fields
output string
}{
{
"test1",
fields{"./test1_actual.yaml"},
"./test1_expected.yaml",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
y1 := &YamlFormatter{
filepath: tt.fields.filepath,
}
buf1 := &bytes.Buffer{}
y1.Run(buf1)
y2 := &YamlFormatter{
filepath: tt.output,
}
buf2 := &bytes.Buffer{}
y2.Run(buf2)
if !reflect.DeepEqual(buf1, buf2) {
t.Error("test error\n", "\n=== actual:\n"+buf1.String(), "\n=== expected:\n"+buf2.String())
}
})
}
}
|
package lumps
/**
Lump 0: Entdata
*/
type EntData struct {
LumpGeneric
data string
}
func (lump *EntData) FromBytes(raw []byte, length int32) {
lump.data = string(raw)
lump.LumpInfo.SetLength(length)
}
func (lump *EntData) GetData() string {
return lump.data
}
func (lump *EntData) ToBytes() []byte {
return []byte(lump.data)
}
|
package state_system
type InitArgs struct {
StateTree *StateTree
GameState *GameState
}
|
// Copyright © 2017 Yehor Nazarkin <nimnull@gmail.com>
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"log"
"net/http"
"strconv"
"time"
"github.com/parnurzeal/gorequest"
"github.com/spf13/viper"
)
func StartReactor() {
sleepSecs := viper.GetInt("rtime")
apiURL := viper.GetString("api")
nodeName := viper.GetString("nodename")
watchPort := viper.GetInt("port")
debugFlag := viper.GetBool("debug")
log.Printf("Debug:\t%s\n", strconv.FormatBool(debugFlag))
log.Printf("API:\t%s\n", apiURL)
log.Printf("Node name:\t%s\n", nodeName)
log.Printf("Port to watch:\t%s\n", strconv.FormatInt(int64(watchPort), 10))
ticker := time.NewTicker(time.Duration(sleepSecs) * time.Second)
quit := make(chan struct{})
for {
select {
case <-ticker.C:
protect(stateUpdateExecutor)
case <-quit:
ticker.Stop()
return
}
}
}
func protect(f func()) {
defer func() {
if x := recover(); x != nil {
log.Printf("Runtime panic %v\n", x)
}
}()
f()
}
func stateUpdateExecutor() {
apiURL := viper.GetString("api")
nodeName := viper.GetString("nodename")
watchPort := viper.GetInt("port")
debugFlag := viper.GetBool("debug")
unique := make(map[string]int)
// channel would be closed as soon Tcp() will finish collecting info
proc_ex := make(chan Process, 10)
go Tcp(proc_ex)
for proc := range proc_ex {
if proc.State == ESTABLISHED && proc.Port == int64(watchPort) {
if _, ok := unique[proc.ForeignIp]; ok {
unique[proc.ForeignIp] += 1
} else {
unique[proc.ForeignIp] = 0
}
}
}
request := gorequest.New()
resp, body, errs := request.
SetDebug(debugFlag).
Timeout(time.Second*3).
Set("Accept", "application/json").
Set("Accept-Language", "en-us").
Set("User-Agent", "node_agent_v1.0").
Post(apiURL).
Type("form").
Send(map[string]string{
"node": nodeName,
"connections": strconv.FormatInt(int64(len(unique)), 10),
"port": strconv.FormatInt(int64(watchPort), 10),
}).
Retry(3, 5*time.Second, http.StatusBadGateway, http.StatusGatewayTimeout).
End()
if len(errs) > 0 {
log.Printf("Request to %s failed: %s\n", apiURL, resp.Status)
for _, err := range errs {
log.Printf("%s\n", err)
}
log.Println(body)
}
}
|
package mr
import "fmt"
import "log"
import "net/rpc"
import "hash/fnv"
import "os"
import "io/ioutil"
import "strconv"
import "encoding/json"
import "sort"
// for sorting by key.
type ByKey []KeyValue
// for sorting by key.
func (a ByKey) Len() int { return len(a) }
func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByKey) Less(i, j int) bool { return a[i].Key < a[j].Key }
//
// Map functions return a slice of KeyValue.
//
type KeyValue struct {
Key string
Value string
}
//
// use ihash(key) % NReduce to choose the reduce
// task number for each KeyValue emitted by Map.
//
func ihash(key string) int {
h := fnv.New32a()
h.Write([]byte(key))
return int(h.Sum32() & 0x7fffffff)
}
//
// main/mrworker.go calls this function.
//
func Worker(mapf func(string, string) []KeyValue,
reducef func(string, []string) string) {
// Your worker implementation here.
// uncomment to send the Example RPC to the master.
// CallExample()
for {
reply := CallForTask(AskForTask, "")
switch(reply.TaskType){
case "map":
Mapf(&reply, mapf)
case "reduce":
Reducef(&reply, reducef)
}
}
}
func CallForTask(msgType int, msgCnt string) MyReply {
args := MyArgs{}
args.MessageType = msgType
args.MessageCnt = msgCnt
reply := MyReply{}
res := call("Master.MyCall", &args, &reply)
if res {
fmt.Printf("reply.type %v\n",reply.TaskType)
} else {
return MyReply{TaskType: ""}
}
return reply
}
func SendInterFiles(msgType int, msgCnt string, nReduceType int) MyReply {
args := MyIntermediateFile{}
args.MessageType = msgType
args.MessageCnt = msgCnt
args.NReduceType = nReduceType
reply := MyReply{}
res := call("Master.MyInnerFileCall", &args, &reply)
if !res {
fmt.Println("error sending intermediate files' location")
}
return reply
}
func Mapf(reply *MyReply, mapf func(string, string) []KeyValue) {
file, err := os.Open(reply.Filename)
defer file.Close()
if err != nil {
log.Fatalf("cannot open %v", reply.Filename)
}
content, err := ioutil.ReadAll(file)
if err != nil {
log.Fatalf("cannot read %v", reply.Filename)
}
kva := mapf(reply.Filename, string(content))
kvas := Partition(kva, reply.NReduce)
for i := 0; i < reply.NReduce; i ++ {
filename := WriteToJSONFile(kvas[i], reply.MapNumAllocated, i)
_ = SendInterFiles(MsgForInterFileLoc, filename, i)
}
_ = CallForTask(MapFinished, reply.Filename)
}
func Partition(kva []KeyValue, nReduce int) [][]KeyValue {
kvas := make([][]KeyValue, nReduce)
for _, kv := range kva {
v := ihash(kv.Key) % nReduce
kvas[v] = append(kvas[v], kv)
}
return kvas
}
func WriteToJSONFile(intermediate []KeyValue, mapTaskNum, reduceTaskNUm int) string {
filename := "mr-" + strconv.Itoa(mapTaskNum) + "-" + strconv.Itoa(reduceTaskNUm)
jfile, _ := os.Create(filename)
enc := json.NewEncoder(jfile)
for _, kv := range intermediate {
err := enc.Encode(&kv)
if err != nil {
log.Fatal("error: ", err)
}
}
return filename
}
func Reducef(reply *MyReply, reducef func(string, []string) string) {
intermediate := []KeyValue{}
for _, v := range reply.ReduceFileList {
file, err := os.Open(v)
defer file.Close()
if err != nil {
log.Fatalf("cannot open %v", v)
}
dec := json.NewDecoder(file)
for {
var kv KeyValue
if err := dec.Decode(&kv); err != nil {
break
}
intermediate = append(intermediate, kv)
}
}
sort.Sort(ByKey(intermediate))
oname := "mr-out-" + strconv.Itoa(reply.ReduceNumAllocated)
ofile, _ := os.Create(oname)
i := 0
for i < len(intermediate) {
j := i + 1
for j < len(intermediate) && intermediate[j].Key == intermediate[i].Key {
j ++
}
values := []string{}
for k := i; k < j; k ++ {
values = append(values, intermediate[k].Value)
}
output := reducef(intermediate[i].Key, values)
fmt.Fprintf(ofile, "%v %v\n", intermediate[i].Key, output)
i = j
}
_ = CallForTask(ReduceFinished, strconv.Itoa(reply.ReduceNumAllocated))
}
func WriteToReduceOutput(key, values string, nReduce int) {
filename := "mr-out-"+strconv.Itoa(nReduce)
ofile, err := os.Open(filename)
if err != nil {
fmt.Println("no such file")
ofile, _ = os.Create(filename)
}
fmt.Fprintf(ofile, "%v %v\n", key, values)
}
//
// send an RPC request to the master, wait for the response.
// usually returns true.
// returns false if something goes wrong.
//
func call(rpcname string, args interface{}, reply interface{}) bool {
// c, err := rpc.DialHTTP("tcp", "127.0.0.1"+":1234")
sockname := masterSock()
c, err := rpc.DialHTTP("unix", sockname)
if err != nil {
log.Fatal("dialing:", err)
}
defer c.Close()
err = c.Call(rpcname, args, reply)
if err == nil {
return true
}
fmt.Println(err)
return false
}
|
package control
import (
"fmt"
"github.com/et-zone/embi/chart"
"github.com/et-zone/embi/dao"
"github.com/et-zone/embi/model"
"github.com/gin-gonic/gin"
// "github.com/go-echarts/go-echarts/v2/charts"
"github.com/go-echarts/go-echarts/v2/components"
)
func HInsert(c *gin.Context) {
h := &model.EHttp{}
err := c.Bind(h)
if err != nil {
fmt.Println(err.Error())
c.JSON(200, gin.H{
"message": "ok",
})
return
}
err = dao.InsertEhttp(h)
if err != nil {
fmt.Println(err.Error())
c.JSON(501, gin.H{
"message": "false",
})
}
c.JSON(200, gin.H{
"message": "ok",
})
return
}
func HGet(c *gin.Context) {
// method := c.GetQuery("m")
// h := dao.GetEhttp()
// table := &chart.Table{
// Title: "",
// Subtitle: "",
// Field: []string{"日期", "指标1", "指标2"}, //
// Value: map[string][]interface{}{
// "指标1": []interface{}{1, 4, 7, 2, 6, 8, 11, 10},
// "指标2": []interface{}{3, 5, 7, 9, 11, 13, 15, 17},
// },
// DimValue: []string{"2021-01", "2021-02", "2021-03", "2021-04", "2021-05", "2021-06", "2021-07", "2021-08"},
// }
// table.Len = len(table.DimValue)
// line := chart.LineMulti(table)
// line.Render(c.Writer)
val, fields := dao.Query("select id,duration,code from e_http")
table := &chart.Table{
Title: "",
Subtitle: "",
Field: fields, //
Value: map[string][]interface{}{},
// DimValue: ,
}
baseBIAddmsg(table, val, fields)
baseBIChangeName(table)
line := chart.LineMulti(table)
line.Render(c.Writer)
}
func Htable(c *gin.Context) {
// method := c.GetQuery("m")
// h := dao.GetEhttp()
// table := &chart.Table{
// Title: "",
// Subtitle: "",
// Field: []string{"日期", "指标1", "指标2"}, //
// Value: map[string][]interface{}{
// "指标1": []interface{}{1, 4, 7, 2, 6, 8, 11, 10},
// "指标2": []interface{}{3, 5, 7, 9, 11, 13, 15, 17},
// },
// DimValue: []string{"2021-01", "2021-02", "2021-03", "2021-04", "2021-05", "2021-06", "2021-07", "2021-08"},
// }
// val, fields := dao.Query("select * from e_http")
val, fields := dao.Query("select id,succ,code from e_http")
table := &chart.Table{
Title: "",
Subtitle: "",
Field: fields, //
Value: map[string][]interface{}{},
// DimValue: ,
}
baseTableAddmsg(table, val, fields)
baseTableChangeName(table)
line := chart.FTable(table)
line.Render(c.Writer)
}
func HGetHtml(c *gin.Context) {
page := components.NewPage()
page.AddCharts()
page.Render(c.Writer)
// c.HTML(200, "index.tmpl", gin.H{"title": "posts/index"})
}
func NowMsg(c *gin.Context) {
page := components.NewPage()
page.AddCharts(
getTableNow(),
getPidNow(),
getLineLastTime(),
getLineNow(),
getTableNowDetail(),
)
page.Render(c.Writer)
}
func HistoryMsg(c *gin.Context) {
//page操作---近30天
page := components.NewPage()
page.AddCharts(
getTableMonth(),
getLineMonth(),
getLineMonthCmp(),
)
page.Render(c.Writer)
}
|
package main
import (
"context"
"google.golang.org/grpc"
"grpc-gateway/healthcheck_client/healthcheck"
"log"
"time"
)
const (
address = "localhost:50051"
)
func main() {
conn, err := grpc.Dial(address, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := healthcheck.NewHealthClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
resp, err := c.Check(ctx, &healthcheck.HealthCheckRequest{})
if err != nil {
log.Fatalf("Could not check service: %v", err)
}
log.Printf("resp: %s", resp.String())
}
|
package main
import (
"context"
"encoding/json"
"errors"
"log"
"os"
"github.com/joeshaw/envdecode"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/grpc/metadata"
servicespb "github.com/dictav/go-genproto-googleads/pb/v13/services"
googleads "github.com/dictav/go-genproto-googleads/v13"
)
const (
gadsAddr = "googleads.googleapis.com:443"
scope = "https://www.googleapis.com/auth/adwords"
)
var env struct {
ClientID string `env:"CLIENT_ID,required"`
ClientSecret string `env:"CLIENT_SECRET,required"`
RefreshToken string `env:"REFRESH_TOKEN,required"`
DeveloperToken string `env:"GADS_DEVELOPER_TOKEN,required"`
LoginCustomerID string `env:"LOGIN_CUSTOMER_ID,required"`
}
func main() {
err := envdecode.Decode(&env)
if err != nil {
log.Fatalln(err)
}
ctx := context.Background()
t := &oauth2.Token{RefreshToken: env.RefreshToken, TokenType: "Bearer"}
conf := &oauth2.Config{
ClientID: env.ClientID,
ClientSecret: env.ClientSecret,
Scopes: []string{scope},
Endpoint: google.Endpoint,
RedirectURL: "http://localhost:8080",
}
opts := []option.ClientOption{
option.WithTokenSource(conf.TokenSource(ctx, t)),
option.WithEndpoint(gadsAddr),
}
ctx = metadata.AppendToOutgoingContext(ctx, "developer-token", env.DeveloperToken)
ctx = metadata.AppendToOutgoingContext(ctx, "login-customer-id", env.LoginCustomerID)
client, err := googleads.NewClient(ctx, opts...)
if err != nil {
log.Fatalln(err)
}
req := &servicespb.SearchGoogleAdsRequest{
CustomerId: env.LoginCustomerID,
Query: query,
}
it := client.Search(ctx, req)
enc := json.NewEncoder(os.Stdout)
for {
row, err := it.Next()
if errors.Is(err, iterator.Done) {
break
}
if err != nil {
log.Fatalln(err)
}
if err := enc.Encode(row); err != nil {
log.Fatalln(err)
}
}
}
const query = `
SELECT
customer_client.id,
customer_client.descriptive_name,
customer_client.manager
FROM
customer_client
WHERE
customer_client.status = ENABLED
`
|
package regclient
import (
"encoding/json"
"fmt"
"strings"
"github.com/sirupsen/logrus"
)
// TLSConf specifies whether TLS is enabled for a host
type TLSConf int
const (
// TLSUndefined indicates TLS is not passed, defaults to Enabled
TLSUndefined TLSConf = iota
// TLSEnabled uses TLS (https) for the connection
TLSEnabled
// TLSInsecure uses TLS but does not verify CA
TLSInsecure
// TLSDisabled does not use TLS (http)
TLSDisabled
)
// MarshalJSON converts to a json string using MarshalText
func (t TLSConf) MarshalJSON() ([]byte, error) {
s, err := t.MarshalText()
if err != nil {
return []byte(""), err
}
return json.Marshal(string(s))
}
// MarshalText converts TLSConf to a string
func (t TLSConf) MarshalText() ([]byte, error) {
var s string
switch t {
default:
s = ""
case TLSEnabled:
s = "enabled"
case TLSInsecure:
s = "insecure"
case TLSDisabled:
s = "disabled"
}
return []byte(s), nil
}
// UnmarshalJSON converts TLSConf from a json string
func (t *TLSConf) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
return t.UnmarshalText([]byte(s))
}
// UnmarshalText converts TLSConf from a string
func (t *TLSConf) UnmarshalText(b []byte) error {
switch strings.ToLower(string(b)) {
default:
return fmt.Errorf("Unknown TLS value \"%s\"", b)
case "":
*t = TLSUndefined
case "enabled":
*t = TLSEnabled
case "insecure":
*t = TLSInsecure
case "disabled":
*t = TLSDisabled
}
return nil
}
// ConfigHost struct contains host specific settings
type ConfigHost struct {
Name string `json:"-"`
Scheme string `json:"scheme,omitempty"` // TODO: deprecate, delete
TLS TLSConf `json:"tls,omitempty"`
RegCert string `json:"regcert,omitempty"`
ClientCert string `json:"clientcert,omitempty"`
ClientKey string `json:"clientkey,omitempty"`
DNS []string `json:"dns,omitempty"` // TODO: remove slice, single string, or remove entirely?
Hostname string `json:"hostname,omitempty"` // replaces DNS array with single string
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
Token string `json:"token,omitempty"`
PathPrefix string `json:"pathPrefix,omitempty"` // used for mirrors defined within a repository namespace
Mirrors []string `json:"mirrors,omitempty"` // list of other ConfigHost Names to use as mirrors
Priority uint `json:"priority,omitempty"` // priority when sorting mirrors, higher priority attempted first
API string `json:"api,omitempty"` // registry API to use
BlobChunk int64 `json:"blobChunk,omitempty"` // size of each blob chunk
BlobMax int64 `json:"blobMax,omitempty"` // threshold to switch to chunked upload, -1 to disable, 0 for regclient.blobMaxPut
}
// ConfigHostNew creates a default ConfigHost entry
func ConfigHostNew() *ConfigHost {
h := ConfigHost{
TLS: TLSEnabled,
}
return &h
}
// ConfigHostNewName creates a default ConfigHost with a hostname
func ConfigHostNewName(host string) *ConfigHost {
h := ConfigHost{
Name: host,
TLS: TLSEnabled,
Hostname: host,
}
if host == DockerRegistry || host == DockerRegistryDNS || host == DockerRegistryAuth {
h.Name = DockerRegistry
h.Hostname = DockerRegistryDNS
}
return &h
}
func (rc *regClient) mergeConfigHost(curHost, newHost ConfigHost, warn bool) ConfigHost {
name := newHost.Name
// merge the existing and new config host
if newHost.User != "" {
if warn && curHost.User != "" && curHost.User != newHost.User {
rc.log.WithFields(logrus.Fields{
"orig": curHost.User,
"new": newHost.User,
"host": name,
}).Warn("Changing login user for registry")
}
curHost.User = newHost.User
}
if newHost.Pass != "" {
if warn && curHost.Pass != "" && curHost.Pass != newHost.Pass {
rc.log.WithFields(logrus.Fields{
"host": name,
}).Warn("Changing login password for registry")
}
curHost.Pass = newHost.Pass
}
if newHost.Token != "" {
if warn && curHost.Token != "" && curHost.Token != newHost.Token {
rc.log.WithFields(logrus.Fields{
"host": name,
}).Warn("Changing login token for registry")
}
curHost.Token = newHost.Token
}
if newHost.TLS != TLSUndefined {
if warn && curHost.TLS != TLSUndefined && curHost.TLS != newHost.TLS {
tlsOrig, _ := curHost.TLS.MarshalText()
tlsNew, _ := newHost.TLS.MarshalText()
rc.log.WithFields(logrus.Fields{
"orig": string(tlsOrig),
"new": string(tlsNew),
"host": name,
}).Warn("Changing TLS settings for registry")
}
curHost.TLS = newHost.TLS
}
if newHost.RegCert != "" {
if warn && curHost.RegCert != "" && curHost.RegCert != newHost.RegCert {
rc.log.WithFields(logrus.Fields{
"orig": curHost.RegCert,
"new": newHost.RegCert,
"host": name,
}).Warn("Changing certificate settings for registry")
}
curHost.RegCert = newHost.RegCert
}
if newHost.ClientCert != "" {
if warn && curHost.ClientCert != "" && curHost.ClientCert != newHost.ClientCert {
rc.log.WithFields(logrus.Fields{
"orig": curHost.ClientCert,
"new": newHost.ClientCert,
"host": name,
}).Warn("Changing client certificate settings for registry")
}
curHost.ClientCert = newHost.ClientCert
}
if newHost.ClientKey != "" {
if warn && curHost.ClientKey != "" && curHost.ClientKey != newHost.ClientKey {
rc.log.WithFields(logrus.Fields{
"host": name,
}).Warn("Changing client certificate key settings for registry")
}
curHost.ClientKey = newHost.ClientKey
}
if newHost.Hostname != "" {
if warn && curHost.Hostname != "" && curHost.Hostname != newHost.Hostname {
rc.log.WithFields(logrus.Fields{
"orig": curHost.Hostname,
"new": newHost.Hostname,
"host": name,
}).Warn("Changing hostname settings for registry")
}
curHost.Hostname = newHost.Hostname
}
if newHost.PathPrefix != "" {
newHost.PathPrefix = strings.Trim(newHost.PathPrefix, "/") // leading and trailing / are not needed
if warn && curHost.PathPrefix != "" && curHost.PathPrefix != newHost.PathPrefix {
rc.log.WithFields(logrus.Fields{
"orig": curHost.PathPrefix,
"new": newHost.PathPrefix,
"host": name,
}).Warn("Changing path prefix settings for registry")
}
curHost.PathPrefix = newHost.PathPrefix
}
if len(newHost.Mirrors) > 0 {
if warn && len(curHost.Mirrors) > 0 && !stringSliceEq(curHost.Mirrors, newHost.Mirrors) {
rc.log.WithFields(logrus.Fields{
"orig": curHost.Mirrors,
"new": newHost.Mirrors,
"host": name,
}).Warn("Changing mirror settings for registry")
}
curHost.Mirrors = newHost.Mirrors
}
if newHost.Priority != 0 {
if warn && curHost.Priority != 0 && curHost.Priority != newHost.Priority {
rc.log.WithFields(logrus.Fields{
"orig": curHost.Priority,
"new": newHost.Priority,
"host": name,
}).Warn("Changing priority settings for registry")
}
curHost.Priority = newHost.Priority
}
if newHost.API != "" {
if warn && curHost.API != "" && curHost.API != newHost.API {
rc.log.WithFields(logrus.Fields{
"orig": curHost.API,
"new": newHost.API,
"host": name,
}).Warn("Changing API settings for registry")
}
curHost.API = newHost.API
}
if newHost.BlobChunk > 0 {
if warn && curHost.BlobChunk != 0 && curHost.BlobChunk != newHost.BlobChunk {
rc.log.WithFields(logrus.Fields{
"orig": curHost.BlobChunk,
"new": newHost.BlobChunk,
"host": name,
}).Warn("Changing blobChunk settings for registry")
}
curHost.BlobChunk = newHost.BlobChunk
}
if newHost.BlobMax != 0 {
if warn && curHost.BlobMax != 0 && curHost.BlobMax != newHost.BlobMax {
rc.log.WithFields(logrus.Fields{
"orig": curHost.BlobMax,
"new": newHost.BlobMax,
"host": name,
}).Warn("Changing blobMax settings for registry")
}
curHost.BlobMax = newHost.BlobMax
}
return curHost
}
func stringSliceEq(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
|
package main
import (
"fmt"
)
func dfs(s, n, mx int, dp, vis []bool) bool {
if n <= 0 {
return false
}
// fmt.Println(s, n)
if vis[s] {
return dp[s]
}
vis[s] = true
for i := 0; i < mx; i++ {
if s&(1<<uint(i)) != 0 {
if !dfs(s^(1<<uint(i)), n-i-1, mx, dp, vis) {
dp[s] = true
break
}
}
}
// fmt.Println(s, n, dp[s][n])
return dp[s]
}
func canIWin(maxChoosableInteger int, desiredTotal int) bool {
if desiredTotal == 0 {
return true
}
if (maxChoosableInteger*(maxChoosableInteger+1))/2 < desiredTotal {
return false
}
dp := make([]bool, 1<<uint(maxChoosableInteger))
vis := make([]bool, 1<<uint(maxChoosableInteger))
return dfs(1<<uint(maxChoosableInteger)-1, desiredTotal, maxChoosableInteger, dp, vis)
}
func main() {
fmt.Println(canIWin(10, 11))
// fmt.Println(canIWin(10, 11))
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// smithtest is a tool to execute sqlsmith tests on cockroach demo
// instances. Failures are tracked, de-duplicated, reduced. Issues are
// prefilled for GitHub.
package main
import (
"bufio"
"bytes"
"context"
gosql "database/sql"
"flag"
"fmt"
"io"
"log"
"math/rand"
"net/url"
"os"
"os/exec"
"regexp"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/internal/sqlsmith"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
"github.com/google/go-github/github"
"github.com/jackc/pgx"
"github.com/lib/pq"
"github.com/pkg/browser"
)
var (
flags = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
cockroach = flags.String("cockroach", "./cockroach", "path to cockroach binary")
reduce = flags.String("reduce", "./bin/reduce", "path to reduce binary")
num = flags.Int("num", 1, "number of parallel testers")
)
func usage() {
fmt.Fprintf(flags.Output(), "Usage of %s:\n", os.Args[0])
flags.PrintDefaults()
os.Exit(1)
}
func main() {
if err := flags.Parse(os.Args[1:]); err != nil {
usage()
}
ctx := context.Background()
setup := WorkerSetup{
cockroach: *cockroach,
reduce: *reduce,
github: github.NewClient(nil),
}
rand.Seed(timeutil.Now().UnixNano())
setup.populateGitHubIssues(ctx)
fmt.Println("running...")
g := ctxgroup.WithContext(ctx)
for i := 0; i < *num; i++ {
g.GoCtx(setup.work)
}
if err := g.Wait(); err != nil {
log.Fatalf("%+v", err)
}
}
// WorkerSetup contains initialization and configuration for running smithers.
type WorkerSetup struct {
cockroach, reduce string
github *github.Client
}
// populateGitHubIssues populates seen with issues already in GitHub.
func (s WorkerSetup) populateGitHubIssues(ctx context.Context) {
var opts github.SearchOptions
for {
results, _, err := s.github.Search.Issues(ctx, "repo:cockroachdb/cockroach type:issue state:open label:C-bug label:O-sqlsmith", &opts)
if err != nil {
log.Fatal(err)
}
for _, issue := range results.Issues {
title := filterIssueTitle(issue.GetTitle())
seenIssues[title] = true
fmt.Println("pre populate", title)
}
if results.GetIncompleteResults() {
opts.Page++
continue
}
return
}
}
func (s WorkerSetup) work(ctx context.Context) error {
rnd := rand.New(rand.NewSource(rand.Int63()))
for {
if err := s.run(ctx, rnd); err != nil {
return err
}
}
}
var (
// lock is used to both protect the seen map from concurrent access
// and prevent overuse of system resources. When the reducer needs to
// run it gets the exclusive write lock. When normal queries are being
// smithed, they use the communal read lock. Thus, the reducer being
// executed will pause the other testing queries and prevent 2 reducers
// from running at the same time. This should greatly speed up the time
// it takes for a single reduction run.
lock syncutil.RWMutex
// seenIssues tracks the seen github issues.
seenIssues = map[string]bool{}
connRE = regexp.MustCompile(`(?m)^sql:\s*(postgresql://.*)$`)
panicRE = regexp.MustCompile(`(?m)^(panic: .*?)( \[recovered\])?$`)
stackRE = regexp.MustCompile(`panic: .*\n\ngoroutine \d+ \[running\]:\n(?s:(.*))$`)
fatalRE = regexp.MustCompile(`(?m)^(fatal error: .*?)$`)
runtimeStackRE = regexp.MustCompile(`goroutine \d+ \[running\]:\n(?s:(.*?))\n\n`)
)
// run is a single sqlsmith worker. It starts a new sqlsmither and in-memory
// single-node cluster. If an error is found it reduces and submits the
// issue. If an issue is successfully found, this function returns, causing
// the started cockroach instance to shut down. An error is only returned if
// something unexpected happened. That is, panics and internal errors will
// return nil, since they are expected. Something unexpected would be like the
// initialization SQL was unable to run.
func (s WorkerSetup) run(ctx context.Context, rnd *rand.Rand) error {
// Stop running after a while to get new setup and settings.
done := timeutil.Now().Add(time.Minute)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
cmd := exec.CommandContext(ctx, s.cockroach,
"start-single-node",
"--port", "0",
"--http-port", "0",
"--insecure",
"--store=type=mem,size=1GB",
"--logtostderr",
)
// Look for the connection string.
var pgdb *pgx.Conn
var db *gosql.DB
var output bytes.Buffer
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return errors.Wrap(err, "start")
}
scanner := bufio.NewScanner(io.TeeReader(stderr, &output))
for scanner.Scan() {
line := scanner.Text()
if match := connRE.FindStringSubmatch(line); match != nil {
config, err := pgx.ParseURI(match[1])
if err != nil {
return errors.Wrap(err, "parse uri")
}
pgdb, err = pgx.Connect(config)
if err != nil {
return errors.Wrap(err, "connect")
}
connector, err := pq.NewConnector(match[1])
if err != nil {
return errors.Wrap(err, "connector error")
}
db = gosql.OpenDB(connector)
fmt.Println("connected to", match[1])
break
}
}
if err := scanner.Err(); err != nil {
fmt.Println(output.String())
return errors.Wrap(err, "scanner error")
}
if db == nil {
fmt.Println(output.String())
return errors.New("no DB address found")
}
fmt.Println("worker started")
initSQL := sqlsmith.Setups[sqlsmith.RandSetup(rnd)](rnd)
if _, err := pgdb.ExecEx(ctx, initSQL, nil); err != nil {
return errors.Wrap(err, "init")
}
setting := sqlsmith.Settings[sqlsmith.RandSetting(rnd)](rnd)
opts := append([]sqlsmith.SmitherOption{
sqlsmith.DisableMutations(),
}, setting.Options...)
smither, err := sqlsmith.NewSmither(db, rnd, opts...)
if err != nil {
return errors.Wrap(err, "new smither")
}
for {
if timeutil.Now().After(done) {
return nil
}
// If lock is locked for writing (due to a found bug in another
// go routine), block here until it has finished reducing.
lock.RLock()
stmt := smither.Generate()
done := make(chan struct{}, 1)
go func() {
_, err = pgdb.ExecEx(ctx, stmt, nil)
done <- struct{}{}
}()
// Timeout slow statements by returning, which will cancel the
// command's context by the above defer.
select {
case <-time.After(10 * time.Second):
fmt.Printf("TIMEOUT:\n%s\n", stmt)
lock.RUnlock()
return nil
case <-done:
}
lock.RUnlock()
if err != nil {
if strings.Contains(err.Error(), "internal error") {
// Return from this function on internal
// errors. This causes the current cockroach
// instance to shut down and we start a new
// one. This is not strictly necessary, since
// internal errors don't mess up the rest of
// cockroach, but it's just easier to have a
// single logic flow in case of a found error,
// which is to shut down and start over (just
// like the panic case below).
return s.failure(ctx, initSQL, stmt, err)
}
}
// If we can't ping, check if the statement caused a panic.
if err := db.PingContext(ctx); err != nil {
input := fmt.Sprintf("%s; %s;", initSQL, stmt)
out, _ := exec.CommandContext(ctx, s.cockroach, "demo", "--no-example-database", "-e", input).CombinedOutput()
var pqerr pq.Error
if match := stackRE.FindStringSubmatch(string(out)); match != nil {
pqerr.Detail = strings.TrimSpace(match[1])
}
if match := panicRE.FindStringSubmatch(string(out)); match != nil {
// We found a panic as expected.
pqerr.Message = match[1]
return s.failure(ctx, initSQL, stmt, &pqerr)
}
// Not a panic. Maybe a fatal?
if match := runtimeStackRE.FindStringSubmatch(string(out)); match != nil {
pqerr.Detail = strings.TrimSpace(match[1])
}
if match := fatalRE.FindStringSubmatch(string(out)); match != nil {
// A real bad non-panic error.
pqerr.Message = match[1]
return s.failure(ctx, initSQL, stmt, &pqerr)
}
// A panic was not found. Shut everything down by returning an error so it can be investigated.
fmt.Printf("output:\n%s\n", out)
fmt.Printf("Ping stmt:\n%s;\n", stmt)
return err
}
}
}
// failure de-duplicates, reduces, and files errors. It generally returns nil
// indicating that this was successfully filed and we should continue looking
// for errors.
func (s WorkerSetup) failure(ctx context.Context, initSQL, stmt string, err error) error {
var message, stack string
var pqerr pgx.PgError
if errors.As(err, &pqerr) {
stack = pqerr.Detail
message = pqerr.Message
} else {
message = err.Error()
}
filteredMessage := filterIssueTitle(regexp.QuoteMeta(message))
message = fmt.Sprintf("sql: %s", message)
lock.Lock()
// Keep this locked for the remainder of the function so that smither
// tests won't run during the reducer, and only one reducer can run
// at once.
defer lock.Unlock()
sqlFilteredMessage := fmt.Sprintf("sql: %s", filteredMessage)
alreadySeen := seenIssues[sqlFilteredMessage]
if !alreadySeen {
seenIssues[sqlFilteredMessage] = true
}
if alreadySeen {
fmt.Println("already found", message)
return nil
}
fmt.Println("found", message)
input := fmt.Sprintf("%s\n\n%s;", initSQL, stmt)
fmt.Printf("SQL:\n%s\n\n", input)
// Run reducer.
cmd := exec.CommandContext(ctx, s.reduce, "-v", "-contains", filteredMessage)
cmd.Stdin = strings.NewReader(input)
cmd.Stderr = os.Stderr
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
fmt.Println(input)
return err
}
// Generate the pre-filled github issue.
makeBody := func() string {
return fmt.Sprintf("```\n%s\n```\n\n```\n%s\n```", strings.TrimSpace(out.String()), strings.TrimSpace(stack))
}
query := url.Values{
"title": []string{message},
"labels": []string{"C-bug,O-sqlsmith"},
"body": []string{makeBody()},
}
url := url.URL{
Scheme: "https",
Host: "github.com",
Path: "/cockroachdb/cockroach/issues/new",
RawQuery: query.Encode(),
}
const max = 8000
// Remove lines from the stack trace to shorten up the request so it's
// under the github limit.
for len(url.String()) > max {
last := strings.LastIndex(stack, "\n")
if last < 0 {
break
}
stack = stack[:last]
query["body"][0] = makeBody()
url.RawQuery = query.Encode()
}
if len(url.String()) > max {
fmt.Println(stmt)
return errors.New("request could not be shortened to max length")
}
if err := browser.OpenURL(url.String()); err != nil {
return err
}
return nil
}
// filterIssueTitle handles issue title where some words in the title can
// vary for identical issues. Usually things like number of bytes, IDs, or
// counts. These are converted into their regex equivalent so they can be
// correctly de-duplicated.
func filterIssueTitle(s string) string {
for _, reS := range []string{
`given: .*, expected .*`,
`Datum is .*, not .*`,
`expected .*, found .*`,
`\d+`,
`\*tree\.D\w+`,
} {
re := regexp.MustCompile(reS)
s = re.ReplaceAllString(s, reS)
}
return s
}
|
package main
import (
"errors"
"github.com/willf/bloom"
"sync"
"sync/atomic"
)
var (
ErrInvalidAnswer = errors.New("Invalid answer")
ErrDuplicateAnswer = errors.New("Duplicate answer")
ErrNoAnswers = errors.New("No answers defined")
ErrTooManyAnswers = errors.New("Too many answers defined")
ErrTooShort = errors.New("Input too short")
ErrTooLong = errors.New("Input too long")
ErrAlreadyVoted = errors.New("You have already voted")
)
const (
MaxAnswers = 9
MinLength = 1
MaxLength = 127
)
// Bloom filter definition; about p=0.0001, n=1000
const (
FilterM = 19171
FilterK = 13
)
type Poll struct {
MultipleChoice bool `json:"multipleChoice"`
Question string `json:"question"`
Answers []string `json:"answers"`
Counts []uint32 `json:"counts"`
stopped int32
filter *bloom.BloomFilter
filterMutex sync.Mutex
}
func checkLength(s string) error {
if len(s) < MinLength {
return ErrTooShort
} else if len(s) > MaxLength {
return ErrTooLong
}
return nil
}
func NewPoll(checkDuplicates, multipleChoice bool, question string, answers ...string) (*Poll, error) {
if len(answers) == 0 {
return nil, ErrNoAnswers
} else if len(answers) > MaxAnswers {
return nil, ErrTooManyAnswers
} else if err := checkLength(question); err != nil {
return nil, err
}
poll := &Poll{
MultipleChoice: multipleChoice,
Question: question,
Answers: make([]string, len(answers)),
Counts: make([]uint32, len(answers)),
stopped: 0,
}
if checkDuplicates {
poll.filter = bloom.New(FilterM, FilterK)
}
for i, answer := range answers {
if err := checkLength(answer); err != nil {
return nil, err
}
poll.Answers[i] = answer
}
return poll, nil
}
func (p *Poll) RecordOrigin(key []byte) bool {
if p.filter == nil {
return true
}
p.filterMutex.Lock()
defer p.filterMutex.Unlock()
success := !p.filter.Test(key)
p.filter.Add(key)
return success
}
func (p *Poll) RecordAnswers(indices ...uint32) error {
if len(indices) == 0 {
return ErrNoAnswers
} else if len(indices) != 1 && !p.MultipleChoice {
return ErrTooManyAnswers
}
numIndices := uint32(len(p.Answers))
// Validate
for i, idx := range indices {
// Make sure the index is valid
if idx >= numIndices {
return ErrInvalidAnswer
}
// Make sure this index has not been seen before
for j := 0; j < i; j++ {
if indices[j] == idx {
return ErrDuplicateAnswer
}
}
}
// Increment the counts
for _, idx := range indices {
atomic.AddUint32(&p.Counts[idx], 1)
}
return nil
}
func (p *Poll) Stop() {
atomic.StoreInt32(&p.stopped, 1)
}
func (p *Poll) Stopped() bool {
return p.stopped == 1
}
|
package iaas
import (
"encoding/json"
"fmt"
"os"
"strings"
. "github.com/afritzler/garden-examiner/cmd/gex/cleanup"
"github.com/afritzler/garden-examiner/cmd/gex/util"
"github.com/afritzler/garden-examiner/pkg"
"github.com/jmoiron/jsonq"
"github.com/mandelsoft/filepath/pkg/filepath"
)
func init() {
RegisterIaasHandler(&gcp{}, "gcp")
}
type gcp struct {
}
func (this *gcp) Execute(shoot gube.Shoot, config map[string]string, args ...string) error {
data := map[string]interface{}{}
tmpAccount := util.ExecCmdReturnOutput("bash", "-c", "gcloud config list account --format json")
dec := json.NewDecoder(strings.NewReader(tmpAccount))
dec.Decode(&data)
jq := jsonq.NewQuery(data)
tmpAccount, err := jq.String("core", "account")
if err != nil {
return fmt.Errorf("cannot list gcloud accounts: %s", err)
}
serviceaccount := []byte(config["serviceaccount.json"])
dec = json.NewDecoder(strings.NewReader(string(serviceaccount)))
dec.Decode(&data)
jq = jsonq.NewQuery(data)
account, err := jq.String("client_email")
if err != nil {
return fmt.Errorf("cannot list gcloud client emails: %s", err)
}
project, err := jq.String("project_id")
if err != nil {
return fmt.Errorf("cannot find project id in account list: %s", err)
}
sa, err := util.NewTempFileInput(serviceaccount)
if err != nil {
return fmt.Errorf("cannot get temporary key file name: %s", err)
}
defer sa.CleanupFunction()()
defer Cleanup(func() {
util.ExecCmd("gcloud config set account "+tmpAccount, nil)
})()
files, keyfile := sa.InheritedFiles(nil)
err = util.ExecCmd("gcloud auth activate-service-account --key-file="+keyfile, nil)
if err != nil {
return fmt.Errorf("cannot activate service account: %s", err)
}
err = util.ExecCmd("gcloud "+strings.Join(args, " ")+" "+"--account="+account+" --project="+project, files)
if err != nil {
return fmt.Errorf("cannot execute 'gcloud': %s", err)
}
return nil
}
func (this *gcp) Export(shoot gube.Shoot, config map[string]string, cachedir string) error {
serviceaccount := []byte(config["serviceaccount.json"])
err := os.MkdirAll(cachedir, 0700)
if err != nil {
return fmt.Errorf("cannot create cache dir '%s' for key file: %s", cachedir, err)
}
keyfile := filepath.Join(cachedir, "gcp.serviceaccount")
file, err := os.OpenFile(keyfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
return fmt.Errorf("cannot create key file '%s' for key file: %s", keyfile, err)
}
if _, err := file.Write(serviceaccount); err != nil {
return fmt.Errorf("cannot write key file '%s' for key file: %s", keyfile, err)
}
if err := file.Close(); err != nil {
return fmt.Errorf("cannot close key file '%s' for key file: %s", keyfile, err)
}
fmt.Printf("activating gcloud service account for %s\n", shoot.GetName())
err = util.ExecCmd("gcloud auth activate-service-account --key-file="+keyfile, nil)
if err != nil {
return fmt.Errorf("cannot activate service account: %s", err)
}
return nil
}
func (this *gcp) Describe(shoot gube.Shoot, attrs *util.AttributeSet) error {
info, err := shoot.GetIaaSInfo()
if err == nil {
iaas := info.(*gube.GCPInfo)
attrs.Attribute("GCP Information", "")
attrs.Attribute("Region", iaas.GetRegion())
attrs.Attribute("VPC Name", iaas.GetVpcName())
attrs.Attribute("Service Accout EMail", iaas.GetServiceAccountEMail())
}
return nil
}
|
package lib
import (
"database/sql"
"testing"
"time"
"github.com/dhaifley/dlib"
"github.com/dhaifley/dlib/dauth"
)
type MockTokenResult struct{}
func (fr *MockTokenResult) LastInsertId() (int64, error) {
return 1, nil
}
func (fr *MockTokenResult) RowsAffected() (int64, error) {
return 1, nil
}
type MockTokenRows struct {
row int
}
func (frs *MockTokenRows) Close() error {
return nil
}
func (frs *MockTokenRows) Next() bool {
frs.row++
if frs.row > 1 {
return false
}
return true
}
func (frs *MockTokenRows) Scan(dest ...interface{}) error {
switch v := dest[0].(type) {
case *int64:
*v = int64(1)
case *int:
*v = 1
default:
return dlib.NewError(500, "Invalid type")
}
if len(dest) > 1 {
switch v := dest[1].(type) {
case *string:
*v = "test"
default:
return dlib.NewError(500, "Invalid type")
}
}
if len(dest) > 2 {
switch v := dest[2].(type) {
case *int64:
*v = 1
default:
return dlib.NewError(500, "Invalid type")
}
}
if len(dest) > 3 {
switch v := dest[3].(type) {
case *dlib.NullTime:
dt := time.Date(1983, 2, 2, 0, 0, 0, 0, time.Local)
*v = dlib.NullTime{Time: dt, Valid: true}
default:
return dlib.NewError(500, "Invalid type")
}
}
if len(dest) > 4 {
switch v := dest[4].(type) {
case *dlib.NullTime:
dt := time.Date(1983, 2, 2, 0, 0, 0, 0, time.Local)
*v = dlib.NullTime{Time: dt, Valid: true}
default:
return dlib.NewError(500, "Invalid type")
}
}
return nil
}
type MockTokenDBSession struct{}
func (m *MockTokenDBSession) Close() error {
return nil
}
func (m *MockTokenDBSession) Exec(query string, args ...interface{}) (sql.Result, error) {
fr := MockTokenResult{}
return &fr, nil
}
func (m *MockTokenDBSession) Query(query string, args ...interface{}) (dlib.SQLRows, error) {
fr := MockTokenRows{}
return &fr, nil
}
func (m *MockTokenDBSession) Ping() error {
return nil
}
func (m *MockTokenDBSession) Stats() sql.DBStats {
return sql.DBStats{OpenConnections: 0}
}
func TestNewTokenAccessor(t *testing.T) {
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
_, ok := ma.(TokenAccessor)
if !ok {
t.Errorf("Type expected: TokenAccessor, got: %T", ma)
}
}
func TestTokenAccessGetTokens(t *testing.T) {
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
var as []dauth.Token
id := int64(1)
c := ma.GetTokens(&dauth.TokenFind{ID: &id})
for r := range c {
if r.Err != nil {
t.Error(r.Err)
}
switch v := r.Val.(type) {
case dauth.Token:
as = append(as, v)
default:
t.Errorf("Invalid data type returned")
}
}
expected := int64(1)
if as[0].ID != expected {
t.Errorf("ID expected: %v, got: %v", expected, as[0].ID)
}
}
func TestTokenAccessGetTokenByID(t *testing.T) {
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
var a dauth.Token
c := ma.GetTokenByID(1)
for r := range c {
if r.Err != nil {
t.Error(r.Err)
}
switch v := r.Val.(type) {
case dauth.Token:
a = v
default:
t.Errorf("Invalid data type returned")
}
}
expected := int64(1)
if a.ID != expected {
t.Errorf("ID expected: %v, got: %v", expected, a.ID)
}
}
func TestTokenAccessDeleteTokenByID(t *testing.T) {
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
c := ma.DeleteTokenByID(1)
var n int
for r := range c {
if r.Err != nil {
t.Error(r.Err)
}
n = r.Num
}
expected := 1
if n != expected {
t.Errorf("Delete count expected: %v, got: %v", expected, n)
}
}
func TestTokenAccessDeleteTokens(t *testing.T) {
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
id := int64(1)
c := ma.DeleteTokens(&dauth.TokenFind{ID: &id})
var n int
for r := range c {
if r.Err != nil {
t.Error(r.Err)
}
n = r.Num
}
expected := 1
if n != expected {
t.Errorf("Delete count expected: %v, got: %v", expected, n)
}
}
func TestTokenAccessSaveToken(t *testing.T) {
a := dauth.Token{ID: 1}
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
c := ma.SaveToken(&a)
for r := range c {
if r.Err != nil {
t.Error(r.Err)
}
switch v := r.Val.(type) {
case dauth.Token:
a = v
default:
t.Errorf("Invalid data type returned")
}
}
expected := int64(1)
if a.ID != expected {
t.Errorf("ID expected: %v, got: %v", expected, a.ID)
}
}
func TestTokenAccessSaveTokens(t *testing.T) {
a := []dauth.Token{dauth.Token{ID: 1}}
mdbs := MockTokenDBSession{}
ma := NewTokenAccessor(&mdbs)
c := ma.SaveTokens(a)
for r := range c {
if r.Err != nil {
t.Error(r.Err)
}
}
expected := int64(1)
if a[0].ID != expected {
t.Errorf("ID expected: %v, got: %v", expected, a[0].ID)
}
}
|
package beat
import (
"testing"
"time"
beat "github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/common"
"github.com/stretchr/testify/assert"
)
func TestDefaultConfig(t *testing.T) {
conf, err := common.LoadFile("../redisbeat.yml")
if err != nil {
t.Errorf("Load file failed %v", err)
}
b := &beat.Beat{}
rrb, err := New(b, conf)
rb, _ := rrb.(*Redisbeat)
assert.Nil(t, err)
assert.Equal(t, DEFAULT_PERIOD, rb.period, "Default time period should be %v", DEFAULT_PERIOD)
assert.Equal(t, DEFAULT_HOST, rb.host, "Default host should be %v", DEFAULT_HOST)
assert.Equal(t, DEFAULT_PORT, rb.port, "Default port should be %v", DEFAULT_PORT)
assert.Equal(t, DEFAULT_NETWORK, rb.network, "Default network should be %v", DEFAULT_NETWORK)
assert.Equal(t, DEFAULT_MAX_CONN, rb.maxConn, "Default max connections should be %v", DEFAULT_MAX_CONN)
assert.Equal(t, DEFAULT_AUTH_REQUIRED, rb.auth, "Default auth required should be %v", DEFAULT_AUTH_REQUIRED)
assert.Equal(t, DEFAULT_AUTH_REQUIRED_PASS, rb.pass, "Default auth required pass should be %v", DEFAULT_AUTH_REQUIRED_PASS)
assert.Equal(t, DEFAULT_STATS_SERVER, rb.serverStats, "Default server stats should be %v", DEFAULT_STATS_SERVER)
assert.Equal(t, DEFAULT_STATS_CLIENT, rb.clientsStats, "Default client stats should be %v", DEFAULT_STATS_CLIENT)
assert.Equal(t, DEFAULT_STATS_MEMORY, rb.memoryStats, "Default memory stats should be %v", DEFAULT_STATS_MEMORY)
assert.Equal(t, DEFAULT_STATS_PERSISTENCE, rb.persistenceStats, "Default persistence stats should be %v", DEFAULT_STATS_PERSISTENCE)
assert.Equal(t, DEFAULT_STATS_STATS, rb.statsStats, "Default stats stats should be %v", DEFAULT_STATS_STATS)
assert.Equal(t, DEFAULT_STATS_REPLICATION, rb.replicationStats, "Default replication stats should be %v", DEFAULT_STATS_REPLICATION)
assert.Equal(t, DEFAULT_STATS_CPU, rb.cpuStats, "Default cpu stats should be %v", DEFAULT_STATS_CPU)
assert.Equal(t, DEFAULT_STATS_COMMAND, rb.commandStats, "Default command stats should be %v", DEFAULT_STATS_COMMAND)
assert.Equal(t, DEFAULT_STATS_CLUSTER, rb.clusterStats, "Default cluster stats should be %v", DEFAULT_STATS_CLUSTER)
assert.Equal(t, DEFAULT_STATS_KEYSPACE, rb.keyspaceStats, "Default keyspace stats should be %v", DEFAULT_STATS_KEYSPACE)
}
func TestModifiedConfig(t *testing.T) {
conf, err := common.LoadFile("../tests/redisbeat.yml")
if err != nil {
t.Fatalf("Load file failed %v", err)
}
b := &beat.Beat{}
rrb, err := New(b, conf)
rb, _ := rrb.(*Redisbeat)
assert.Nil(t, err)
expectedTime := 5 * time.Second
assert.Equal(t, expectedTime, rb.period, "Configured time period should be %v", expectedTime)
assert.Equal(t, "redis.testing.fake", rb.host, "Configured host should be %v", "redis.testing.fake")
assert.Equal(t, 9736, rb.port, "Configured port should be %v", 9736)
assert.Equal(t, "udp", rb.network, "Configured network should be %v", "udp")
assert.Equal(t, 5, rb.maxConn, "Configured max connections should be %v", 5)
assert.Equal(t, true, rb.auth, "Configured auth required should be %v", true)
assert.Equal(t, "p@ssw0rd", rb.pass, "Configured auth required pass should be %v", "p@ssw0rd")
assert.Equal(t, true, rb.serverStats, "Configured server stats should be %v", true)
assert.Equal(t, false, rb.clientsStats, "Configured client stats should be %v", false)
assert.Equal(t, false, rb.memoryStats, "Configured memory stats should be %v", false)
assert.Equal(t, false, rb.persistenceStats, "Configured persistence stats should be %v", false)
assert.Equal(t, false, rb.statsStats, "Configured stats stats should be %v", false)
assert.Equal(t, false, rb.replicationStats, "Configured replication stats should be %v", false)
assert.Equal(t, false, rb.cpuStats, "Configured cpu stats should be %v", false)
assert.Equal(t, false, rb.commandStats, "Configured command stats should be %v", false)
assert.Equal(t, false, rb.clusterStats, "Configured cluster stats should be %v", false)
assert.Equal(t, false, rb.keyspaceStats, "Configured keyspace stats should be %v", false)
}
func TestConvertReplyToMap(t *testing.T) {
testReplyString := "# Server\r\nredis_version:3.0.0\r\nredis_mode:standalone\r\nmultiplexing_api:epoll\r\n"
replyMap, err := convertReplyToMap(testReplyString)
assert.Nil(t, err, "Valid string reply should not throw an error")
assert.Equal(t, "3.0.0", replyMap["redis_version"], "Redis version should be 3.0.0")
assert.Equal(t, "standalone", replyMap["redis_mode"], "Redis mode should be standalone")
assert.Equal(t, "epoll", replyMap["multiplexing_api"], "Redis multiplexing api should be epoll")
}
|
package cpu
// LR35902 simulates a Game Boy CPU through the usage of registers, program counter,
// stack pointer, and more. More or less this will be the full "logic" of a real CPU.
type LR35902 struct {
registers *registers
pc uint16
sp uint16
}
|
package web
import (
"asyncMessageSystem/app/config"
"asyncMessageSystem/app/controller/producer"
log2 "asyncMessageSystem/app/middleware/log"
"github.com/kataras/iris"
"log"
"runtime/debug"
)
func PanicHandler(ctx iris.Context) {
defer func() {
msg := recover()
if msg != nil {
err := debug.Stack()
log2.MainLogger.Error(msg.(string) + "["+string(err)+"]")
if config.Conf.Web.Debug {
log.Println(msg, "["+string(err)+"]")
strmsg := msg.(string)+"\r\n"
bytemsg := []byte(strmsg)
ctx.Write(append(bytemsg,err...))
return
}else{
ctx.JSON(producer.ReturnJson{Code: 10001, Msg: "System is busy now!", Data: map[string]interface{}{}})
return
}
}
}()
ctx.Next() //继续执行下一个handler,在本例中是mainHandler。
} |
package shape
import (
"fmt"
"io"
"github.com/gregoryv/draw/xy"
"github.com/gregoryv/nexus"
)
func NewState(title string) *State {
return &State{
Title: title,
Font: DefaultFont,
Pad: DefaultTextPad,
class: "state",
}
}
type State struct {
X, Y int
Title string
Font Font
Pad Padding
class string
}
func (r *State) String() string {
return fmt.Sprintf("R %q", r.Title)
}
func (r *State) Position() (int, int) { return r.X, r.Y }
func (r *State) SetX(x int) { r.X = x }
func (r *State) SetY(y int) { r.Y = y }
func (r *State) Direction() Direction { return DirectionRight }
func (r *State) SetClass(c string) { r.class = c }
func (r *State) WriteSVG(out io.Writer) error {
w, err := nexus.NewPrinter(out)
w.Printf(
`<rect class="%s" x="%v" y="%v" width="%v" height="%v"/>`,
r.class, r.X, r.Y, r.Width(), r.Height())
w.Printf("\n")
r.title().WriteSVG(w)
return *err
}
func (r *State) title() *Label {
return &Label{
x: r.X + r.Pad.Left,
y: r.Y + r.Pad.Top/2,
Font: r.Font,
Text: r.Title,
class: "state-title",
}
}
func (r *State) SetFont(f Font) { r.Font = f }
func (r *State) SetTextPad(pad Padding) { r.Pad = pad }
func (r *State) Height() int {
return boxHeight(r.Font, r.Pad, 1)
}
func (r *State) Width() int {
return boxWidth(r.Font, r.Pad, r.Title)
}
// Edge returns intersecting position of a line starting at start and
// pointing to the rect center.
func (r *State) Edge(start xy.Point) xy.Point {
return boxEdge(start, r)
}
|
package main
//637. 二叉树的层平均值
//给定一个非空二叉树, 返回一个由每层节点平均值组成的数组。
//示例 1:
//输入:
//3
/// \
//9 20
/// \
//15 7
//输出:[3, 14.5, 11]
//解释:
//第 0 层的平均值是 3 , 第1层是 14.5 , 第2层是 11 。因此返回 [3, 14.5, 11] 。
//提示:节点值的范围在32位有符号整数范围内。
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func averageOfLevels(root *TreeNode) []float64 {
}
|
package ex7_2
import (
"io"
)
type CountWriter struct {
Writer io.Writer
Count int64
}
func (c *CountWriter) Write(in []byte) (n int, err error) {
n, err = c.Writer.Write(in)
c.Count += int64(n)
return
}
func CountingWriter(w io.Writer) (io.Writer, *int64) {
cw := &CountWriter{
Writer: w,
Count: 0,
}
return cw, &cw.Count
}
|
package main
import (
"fmt"
"os"
"strconv"
"github.com/iotaledger/hive.go/codegen/variadic"
)
// main is the entry point of the variadic code generator.
func main() {
if len(os.Args) < 4 {
printUsage("not enough parameters")
}
minParamsCount, err := strconv.Atoi(os.Args[1])
if err != nil {
printUsage("minParamsCount (1st parameter) must be an integer")
}
maxParamsCount, err := strconv.Atoi(os.Args[2])
if err != nil {
printUsage("maxParamsCount (2nd parameter) must be an integer")
}
template := variadic.New()
panicOnErr(template.Parse(os.Getenv("GOFILE")))
panicOnErr(template.Generate(os.Args[3], minParamsCount, maxParamsCount))
}
// printUsage prints the usage of the variadic code generator in case of an error.
func printUsage(errorMsg string) {
_, _ = fmt.Fprintf(os.Stderr, "Error:\t%s\n\n", errorMsg)
_, _ = fmt.Fprintf(os.Stderr, "Usage of variadic:\n")
_, _ = fmt.Fprintf(os.Stderr, "\tvariadic [minParamsCount] [maxParamsCount] [outputFile]\n")
os.Exit(2)
}
// panicOnErr panics if the given error is not nil.
func panicOnErr(err error) {
if err != nil {
panic(err)
}
}
|
package main
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"net"
"github.com/astaxie/beego/logs"
"github.com/wsq1220/chatroomServer/proto"
)
type Client struct {
conn net.Conn
userId int
buf [8192]byte
}
// receive
func (p *Client) readPackage() (msg proto.Message, err error) {
n, err := p.conn.Read(p.buf[0:4])
if n != 4 || err != nil {
logs.Error("client read head data failed, err: %v", err)
return
}
logs.Info("read head data: %v", p.buf[0:4])
packLen := binary.BigEndian.Uint32(p.buf[0:4])
logs.Debug("receive len: %v", packLen)
n, err = p.conn.Read(p.buf[0:packLen])
// if n != int(packLen) {
// errMsg := fmt.Sprintf("read body data failed, expect: %v, actual: %v", int(packLen), n)
// err = errors.New(errMsg)
// logs.Error(errMsg)
// return
// }
if err != nil {
logs.Error("read body data failed, err: %v", err)
return
}
logs.Info("received body data: %v", string(p.buf[0:packLen]))
if err = json.Unmarshal(p.buf[0:n], &msg); err != nil {
logs.Error("json unmarshal failed, err: %v", err)
return
}
return
}
// send
func (p *Client) writePackage(data []byte) (err error) {
if data == nil {
return
}
packLen := uint32(len(data))
binary.BigEndian.PutUint32(p.buf[0:4], packLen)
if _, err = p.conn.Write(p.buf[0:4]); err != nil {
logs.Error("write head data failed, err: %v", err)
return
}
logs.Info("write head data [%v] succ!", string(p.buf[0:4]))
n, err := p.conn.Write([]byte(data))
if err != nil {
logs.Error("write data failed, err: %v", err)
return
}
if n != int(packLen) {
errMsg := fmt.Sprintf("write data not finished, now: %v/%v", n, int(packLen))
err = errors.New(errMsg)
logs.Error(errMsg)
return
}
return
}
func (p *Client) Process() (err error) {
for {
var msg proto.Message
msg, err = p.readPackage()
logs.Info("received msg: %v", msg)
if err != nil {
logs.Error("read package failed when processing: %v", err)
// TODO
// clientMgr.DelClient(p.UserId)
return
}
err = p.ProcessMsg(msg)
if err != nil {
logs.Error("process msg failed, err: %v, will continue", err)
continue
}
}
}
// 处理请求
func (p *Client) ProcessMsg(msg proto.Message) (err error) {
switch msg.Cmd {
case proto.UserLoginCmd:
err = p.login(msg)
case proto.UserRegisterCmd:
err = p.register(msg)
case proto.UserSendMessageCmd:
err = p.processUserSendMsg(msg)
default:
errMsg := fmt.Sprintf("the [%v] is not supported messgae!", msg.Cmd)
err = errors.New(errMsg)
logs.Error(errMsg)
return
}
return
}
func (p *Client) login(msg proto.Message) (err error) {
defer func() {
p.loginResp(err)
}()
logs.Debug("enter login, got msg: %v", msg)
var loginData proto.Login
err = json.Unmarshal([]byte(msg.Data), &loginData)
if err != nil {
logs.Error("json unmarshal failed, err: %v", err)
return
}
_, err = mgr.Login(loginData.Id, loginData.Password)
if err != nil {
logs.Error("login failed, err: %v", err)
return
}
logs.Info("user[%v] login succ!", loginData.Id)
// 添加到map中
clientMgr.AddClient(loginData.Id, p)
p.userId = loginData.Id
// 通知其他用户此登录用户上线了
p.notifyOtherUserOnline(loginData.Id)
return
}
func (p *Client) notifyOtherUserOnline(userId int) {
for id, client := range clientMgr.onlineUsers {
if id == userId {
continue
}
client.NotifyUserOnline(userId)
}
}
func (p *Client) NotifyUserOnline(userId int) {
var respMsg proto.Message
respMsg.Cmd = proto.UserStatusNotifyCmd
var notify proto.UserStatusNotify
notify.UserId = userId
notify.Status = proto.UserStatusOnline
notifyData, err := json.Marshal(notify)
if err != nil {
logs.Error("json marshal failed, err: %v", err)
return
}
respMsg.Data = string(notifyData)
data, err := json.Marshal(respMsg)
if err != nil {
logs.Error("json marshal failed, err: %v", err)
return
}
err = p.writePackage(data)
if err != nil {
logs.Error("notify other user you online failed, err: %v", err)
return
}
}
func (p *Client) loginResp(err error) {
var respMsg proto.Message
respMsg.Cmd = proto.UserLoginResCmd
var loginResp proto.LoginResp
loginResp.StatusCode = 200
userMap := clientMgr.GetAllUsers()
logs.Debug("all user: %v", userMap)
// 所有在线的用户
for userId, _ := range userMap {
loginResp.User = append(loginResp.User, userId)
}
if err != nil {
loginResp.StatusCode = 500
loginResp.Error = fmt.Sprintf("%v", err)
}
data, err := json.Marshal(loginResp)
if err != nil {
logs.Error("json marshal failed, err: %v", err)
return
}
respMsg.Data = string(data)
respData, err := json.Marshal(respMsg)
if err != nil {
logs.Error("json marshal failed, err: %v", err)
return
}
err = p.writePackage(respData)
if err != nil {
logs.Error("write login resp data failed, err: %v", err)
return
}
}
func (p *Client) register(msg proto.Message) (err error) {
var register proto.Register
if err = json.Unmarshal([]byte(msg.Data), ®ister); err != nil {
return
}
err = mgr.Register(®ister.User)
if err != nil {
logs.Error("register failed, err: %v", err)
return
}
logs.Info("register user [%v] succ!", register.User.UserId)
// if err = p.writePackage([]byte(resp)); err != nil {
// logs.Warn("register resp send failed, err: %v", err)
// }
return
}
func (p *Client) processUserSendMsg(msg proto.Message) (err error) {
var sendMsgReq proto.SendMsgReq
err = json.Unmarshal([]byte(msg.Data), &sendMsgReq)
if err != nil {
logs.Error("json unmarshal failed, err: %v", err)
return
}
users := clientMgr.GetAllUsers()
for userId, client := range users {
if userId == sendMsgReq.UserId {
continue
}
client.SendMsgToUser(sendMsgReq.UserId, sendMsgReq.Data)
}
return
}
func (p *Client) SendMsgToUser(userId int, text string) {
var respMsg proto.Message
respMsg.Cmd = proto.UserRecvMessageCmd
var recvMsg proto.UserRecvMsgReq
recvMsg.UserId = userId
recvMsg.Data = text
recvMsgData, err := json.Marshal(recvMsg)
if err != nil {
logs.Error("json marshal failed, err: %v", err)
return
}
respMsg.Data = string(recvMsgData)
data, err := json.Marshal(respMsg)
if err != nil {
logs.Error("json marshal failed, err: %v", err)
return
}
err = p.writePackage(data)
if err != nil {
logs.Error("send message failed, err: %v", err)
return
}
}
|
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package sync2_test
import (
"context"
"sync"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/sync2"
)
func TestNewSuccessThreshold(t *testing.T) {
t.Parallel()
var testCases = []struct {
desc string
tasks int
successThreshold float64
isError bool
}{
{
desc: "OK",
tasks: 10,
successThreshold: 0.75,
isError: false,
},
{
desc: "OK",
tasks: 134,
successThreshold: 1,
isError: false,
},
{
desc: "Error: invalid tasks (0)",
tasks: 0,
successThreshold: 0.75,
isError: true,
},
{
desc: "Error: invalid tasks (1)",
tasks: 1,
successThreshold: 0.75,
isError: true,
},
{
desc: "Error: invalid tasks (negative)",
tasks: -23,
successThreshold: 0.75,
isError: true,
},
{
desc: "Error: invalid successThreshold (0)",
tasks: 134,
successThreshold: 0,
isError: true,
},
{
desc: "Error: invalid successThreshold (negative)",
tasks: 134,
successThreshold: -1.5,
isError: true,
},
{
desc: "Error: invalid successThreshold (greater than 1)",
tasks: 134,
successThreshold: 1.00001,
isError: true,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
_, err := sync2.NewSuccessThreshold(tc.tasks, tc.successThreshold)
if tc.isError {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}
func TestSuccessThreshold_AllSuccess(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.7
successfulTasks = 7
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
for i := 0; i < tasks; i++ {
go func() {
successThreshold.Success()
wg.Done()
}()
}
successThreshold.Wait(context.Background())
wg.Wait()
require.Equal(t, tasks, successThreshold.SuccessCount())
require.Equal(t, 0, successThreshold.FailureCount())
}
func TestSuccessThreshold_AllFailures(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.7
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
for i := 0; i < tasks; i++ {
go func() {
successThreshold.Failure()
wg.Done()
}()
}
successThreshold.Wait(context.Background())
wg.Wait()
require.Equal(t, 0, successThreshold.SuccessCount())
require.Equal(t, tasks, successThreshold.FailureCount())
}
func TestSuccessThreshold_FailuresWithReachedSuccessThreshold(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.4
successfulTasks = 4
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
successfulTasksDone := make(chan struct{}, successfulTasks)
for i := 0; i < tasks; i++ {
go func(i int) {
// Alternate tasks with success & failure
if i%2 == 0 {
successfulTasksDone <- struct{}{}
successThreshold.Success()
} else {
successThreshold.Failure()
}
wg.Done()
}(i)
}
successThreshold.Wait(context.Background())
// Check that Wait unblocked when reached the successThreshold
require.Len(t, successfulTasksDone, cap(successfulTasksDone))
require.Equal(t, successfulTasks, successThreshold.SuccessCount())
// purge the rest of the goroutines
for i := successfulTasks; i < tasks/2; i++ {
<-successfulTasksDone
}
wg.Wait()
}
func TestSuccessThreshold_FailuresWithoutReachedSuccessThreshold(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.8
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
for i := 0; i < tasks; i++ {
go func(i int) {
// Alternate tasks with success & failure
if i%2 == 0 {
successThreshold.Success()
} else {
successThreshold.Failure()
}
wg.Done()
}(i)
}
successThreshold.Wait(context.Background())
wg.Wait()
require.Equal(t, tasks/2, successThreshold.SuccessCount())
require.Equal(t, tasks/2, successThreshold.FailureCount())
}
func TestSuccessThreshold_ExtraTasksAreFine(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.7
extraTasks = 5
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks + extraTasks)
for i := 0; i < (tasks + extraTasks); i++ {
go func(i int) {
if i%2 == 0 {
successThreshold.Success()
} else {
successThreshold.Failure()
}
wg.Done()
}(i)
}
successThreshold.Wait(context.Background())
wg.Wait()
}
func TestSuccessThreshold_SuccessRateCloseTo0(t *testing.T) {
t.Parallel()
const (
tasks = 2
threshold = 0.1
expectedThreshold = 1
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
completedTasks := make(chan struct{}, expectedThreshold)
for i := 0; i < tasks; i++ {
go func() {
completedTasks <- struct{}{}
successThreshold.Success()
wg.Done()
}()
}
successThreshold.Wait(context.Background())
// Check that Wait unblocked when reached the successThreshold
require.Len(t, completedTasks, cap(completedTasks))
// purge the rest of the goroutines
for i := expectedThreshold; i < tasks; i++ {
<-completedTasks
}
wg.Wait()
}
func TestSuccessThreshold_SuccessThresholdNumTasks(t *testing.T) {
t.Parallel()
const (
tasks = 2
threshold = 1
expectedThreshold = 2
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
for i := 0; i < tasks; i++ {
go func() {
successThreshold.Success()
wg.Done()
}()
}
successThreshold.Wait(context.Background())
wg.Wait()
}
func TestSuccessThreshold_CallingWaitMoreThanOnce(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.7
successfulTasks = 7
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Add(tasks)
for i := 0; i < tasks; i++ {
go func() {
successThreshold.Success()
wg.Done()
}()
}
successThreshold.Wait(context.Background())
// These two wait calls must not block
successThreshold.Wait(context.Background())
successThreshold.Wait(context.Background())
wg.Wait()
}
func TestSuccessThreshold_CancellingWait(t *testing.T) {
t.Parallel()
const (
tasks = 10
threshold = 0.7
successfulTasks = 7
)
successThreshold, err := sync2.NewSuccessThreshold(tasks, threshold)
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
cancel()
successThreshold.Wait(ctx)
}
|
package clock
import "fmt"
// Clock structure
type Clock struct {
hour int
minute int
}
// New constructs Clock object
func New(h, m int) Clock {
return Clock{0, 0}.Add(h*60 + m)
}
func (c Clock) String() string {
return fmt.Sprintf("%02d:%02d", c.hour, c.minute)
}
// Add adds amount 'minutes' to current clock.
func (c Clock) Add(minutes int) Clock {
h := minutes / 60
m := minutes - 60*h
if c.minute+m < 0 {
h--
}
return Clock{(c.hour + h + (c.minute+m)/60 + 24*abs(h)) % 24, (c.minute + m + abs(h)*60) % 60}
}
// Subtract substracts amount 'minutes' to current clock.
func (c Clock) Subtract(minutes int) Clock {
return c.Add(-minutes)
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
|
package main
import (
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
)
var a App
func TestMain(m *testing.M) {
a = App{}
a.Init()
code := m.Run()
os.Exit(code)
}
func TestBadMode(t *testing.T) {
req, _ := http.NewRequest("POST", "/process", nil)
response := executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, response.Code)
if body := response.Body.String(); !strings.Contains(body, "Process mode not supported") {
t.Errorf("Expected an empty array. Got %s", body)
}
}
func executeRequest(req *http.Request) *httptest.ResponseRecorder {
rr := httptest.NewRecorder()
a.router.ServeHTTP(rr, req)
return rr
}
func checkResponseCode(t *testing.T, expected, actual int) {
if expected != actual {
t.Errorf("Expected response code %d. Got %d\n", expected, actual)
}
}
|
/*
/tmp/1.txt内容为:
{
"name":"xiaoli",
"Age":20,
"Sex":"女"
}
*/
package main
import (
"fmt"
"log"
"encoding/json"
"os"
)
type person struct {
Name string `json:"name"`
Age int `"json:age"`
Sex string `"json:sex"`
}
func main(){
fd, err := os.Open("/tmp/1.txt")
if err != nil {
log.Fatal("os.Open Err : ", err)
}
var p *person
o := json.NewDecoder(fd).Decode(&p)
if o != nil {
log.Fatal("json.newdecoder : ", o)
}
fmt.Println(p.Age)
//for _, one := range p {
// fmt.Println(one.Age)
//}
}
|
package main
import (
"fmt"
"imooc/pipeline"
"os"
"bufio"
)
func main() {
const filename = "small.in"
const n = 64
file, err := os.Create(filename)
if err != nil {
panic(err)
}
defer file.Close()
write := bufio.NewWriter(file)
pipeline.WriterSink(write,pipeline.RandomResource(n))
write.Flush()
/*file, err := os.Open("small.in")
if err != nil {
panic(err)
}
defer file.Close()
p := pipeline.ReaderSource(file,-1)
for v := range p {
fmt.Println(v)
}*/
}
func MergeDemo(){
p := pipeline.Merge(pipeline.InMemSort(pipeline.ArraySource(1,3,2,6,9,7,8)),pipeline.InMemSort(pipeline.ArraySource(1,3,2,6,9,7,8,12)))
for val := range p {
fmt.Println(val)
}
} |
package client
import (
"io"
"net/http"
"time"
)
const (
maxDefaultRetries = 3
)
// These values are derived from the default values of DefaultTransport and
// Transport respectively from net/http/transport.go
var (
defaultRequestTimeout = 30 * time.Second
defaultTLSHandshakeTimeout = 10 * time.Second
defaultMaxIdleConns = 100
defaultMaxIdleConnsPerHost = 2
defaultIdleConnTimeout = 90 * time.Second
)
// Client implements the base client request and response handling
// used by all service clients.
type Client struct {
Authorizer Authorizer
Retryer Retryer
httpClient *http.Client
baseURL string
}
// NewClient returns a new instance of sdk.Client.
// The config gets sanitized by returning a copy of the passed configuration
// with default set if the given values are not set by the caller.
// The default values are derived from the default values of DefaultTransport
// and Transport respectively from net/http/transport.go
func NewClient(config Config) *Client {
config = sanitize(config)
httpClient := newHTTPClient(config)
client := &Client{
baseURL: config.BaseURL,
httpClient: httpClient,
Retryer: config.Retryer,
Authorizer: config.Authorizer,
}
return client
}
// sanitize is some space shuttle code that enables configuration to be
// easy.
func sanitize(config Config) Config {
sanitized := Config{
Authorizer: config.Authorizer,
BaseURL: config.BaseURL,
}
sanitized.Retryer = config.Retryer
if config.Retryer == nil {
sanitized.Retryer = DefaultRetryer{NumMaxRetries: maxDefaultRetries}
}
sanitized.RequestTimeout = &defaultRequestTimeout
if config.RequestTimeout != nil {
sanitized.RequestTimeout = config.RequestTimeout
}
sanitized.TLSHandshakeTimeout = &defaultTLSHandshakeTimeout
if config.TLSHandshakeTimeout != nil {
sanitized.TLSHandshakeTimeout = config.TLSHandshakeTimeout
}
sanitized.MaxIdleConns = &defaultMaxIdleConns
if config.MaxIdleConns != nil {
sanitized.MaxIdleConns = config.MaxIdleConns
}
sanitized.MaxIdleConnsPerHost = &defaultMaxIdleConnsPerHost
if config.MaxIdleConnsPerHost != nil {
sanitized.MaxIdleConnsPerHost = config.MaxIdleConnsPerHost
}
sanitized.IdleConnTimeout = &defaultIdleConnTimeout
if config.IdleConnTimeout != nil {
sanitized.IdleConnTimeout = config.IdleConnTimeout
}
return sanitized
}
// newHTTPClient creates a HTTP client based on go's http.DefaultClient and
// http.DefaultTransport.
func newHTTPClient(config Config) *http.Client {
defaultTransport, _ := http.DefaultTransport.(*http.Transport)
defaultTransport.TLSHandshakeTimeout = *config.TLSHandshakeTimeout
defaultTransport.MaxIdleConns = *config.MaxIdleConns
defaultTransport.MaxIdleConnsPerHost = *config.MaxIdleConnsPerHost
defaultTransport.IdleConnTimeout = *config.IdleConnTimeout
client := http.DefaultClient
client.Transport = defaultTransport
client.Timeout = *config.RequestTimeout
return client
}
// NewRequest is to get a new Request option tied to a client
func (c *Client) NewRequest(op Operation, output interface{},
body io.ReadSeeker, paramsList ...map[string]string) (*Request, error) {
var params map[string]string
if len(paramsList) == 0 {
params = nil
} else {
params = paramsList[0]
}
return NewRequest(c.httpClient, c.Retryer, c.Authorizer, c.baseURL, op,
output, body, params)
}
|
package k8sutil
import (
"fmt"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// GetClient returns a Kubernetes client (clientset) from the kubeconfig path
// or from the in-cluster service account environment.
func GetClient(path string) (*kubernetes.Clientset, error) {
conf, err := getClientConfig(path)
if err != nil {
return nil, fmt.Errorf("failed to get Kubernetes client config: %w", err)
}
return kubernetes.NewForConfig(conf)
}
// getClientConfig returns a Kubernetes client Config.
func getClientConfig(path string) (*rest.Config, error) {
if path != "" {
// build Config from a kubeconfig filepath
return clientcmd.BuildConfigFromFlags("", path)
}
// uses pod's service account to get a Config
return rest.InClusterConfig()
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"go/build"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
)
var (
diff = flag.String("diff", "HEAD", "The git commit pattern to diff by. E.g.: 'HEAD', or '<commit>...<commit>'")
debug = flag.Bool("debug", false, "Verbose output.")
)
const (
sep = string(filepath.Separator)
testdataPattern1 = "testdata" + sep
testdataPattern2 = sep + "testdata" + sep
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, "\t%s [-debug] [-diff <diff>] [<packages>]\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Options:\n")
flag.PrintDefaults()
}
flag.Parse()
diffs := gitAllDiffs(*diff)
debugDo(func() {
fmt.Println("--- git diffs ---")
for _, file := range diffs.SortedSlice() {
fmt.Println(file)
}
fmt.Println()
})
packages := goList(flag.Args())
impacted := pathsImpacted(packages, diffs)
debugDo(func() {
fmt.Println("--- paths impacted ---")
for _, path := range impacted.SortedSlice() {
fmt.Println("." + sep + path)
}
fmt.Println()
})
removePathsWithoutBuildableGoFiles(impacted)
debugDo(func() {
fmt.Println("--- buildable paths impacted ---")
})
for _, path := range impacted.SortedSlice() {
fmt.Println("." + sep + path)
}
}
type StringSet map[string]bool
func (set StringSet) Add(vals ...string) {
for _, val := range vals {
set[val] = true
}
}
func (set StringSet) Exists(val string) bool {
return set[val]
}
func (set StringSet) Del(val string) {
delete(set, val)
}
func (set StringSet) SortedSlice() []string {
slice := make([]string, len(set))
var i int
for val := range set {
slice[i] = val
i++
}
sort.Strings(slice)
return slice
}
func (set StringSet) Merge(o StringSet) {
for val := range o {
set.Add(val)
}
}
func removePathsWithoutBuildableGoFiles(paths StringSet) {
for path := range paths {
infos, err := ioutil.ReadDir(path)
if err != nil {
paths.Del(path)
}
var hasBuildableGoFiles bool
for _, info := range infos {
if strings.HasSuffix(info.Name(), ".go") {
hasBuildableGoFiles = true
break
}
}
if !hasBuildableGoFiles {
paths.Del(path)
}
}
}
func hasTestFiles(path string) bool {
infos, err := ioutil.ReadDir(path)
check(err)
for _, info := range infos {
if strings.HasSuffix(info.Name(), "_test.go") && info.Name()[0] != '.' && info.Name()[0] != '_' {
return true
}
}
return false
}
func pathsImpacted(packages []Package, diffs StringSet) StringSet {
// ie: locations that need testing
impactedPaths := StringSet{}
// ie: go code that's filed
alteredPaths := StringSet{}
projectDir := gitRoot()
for file := range diffs {
/*
The following is a set of rules for how to handle different types of diffs:
- If a file is ignored by the go tool, then we ignore it too.
- If a file is inside a testdata directory, then we mark all ancestors of testdata as impacted
- If a file is a test file, then of course we mark that path as impacted
- If a file is a .go file, then we mark that path as impacted AND altered
*/
basename := filepath.Base(file)
dir := filepath.Dir(file)
switch {
case strings.HasPrefix(basename, "."):
// The go tool ignores "dot" files and so shall we
continue
case strings.HasPrefix(basename, "_"):
// The go tool ignores files with "_" prefixes and so shall we
continue
case strings.HasSuffix(basename, "_test.go"):
// Good to ".go"! Get it? It's funny cuz it's Go...
impactedPaths.Add(dir)
continue
case strings.HasPrefix(dir, testdataPattern1):
// Then the project root needs testing (eg: testdata/foo/bar.txt)
if hasTestFiles(".") {
impactedPaths.Add(".")
}
continue
case strings.Contains(dir, testdataPattern2):
// Changes to "testdata" directories impact tests in the parent directory and all ancestor dirs.
// (eg: foo/testdata/bar.txt)
for parentDir := dir[:strings.Index(dir, testdataPattern2)]; parentDir != "."; parentDir = filepath.Dir(parentDir) {
if hasTestFiles(parentDir) {
impactedPaths.Add(parentDir)
}
}
continue
case strings.HasSuffix(basename, ".go"):
impactedPaths.Add(dir)
alteredPaths.Add(dir)
continue
}
}
for _, pkg := range packages {
func() {
// Check if this package itself was altered
pkgRelativePath, err := filepath.Rel(projectDir, pkg.Dir)
check(err)
if alteredPaths.Exists(pkgRelativePath) {
impactedPaths.Add(pkgRelativePath)
return
}
// Check the package's dependencies to see if any were altered
for _, dep := range pkg.Deps {
buildPkg, err := build.Import(dep, projectDir, build.FindOnly)
check(err)
depRelativePath, err := filepath.Rel(projectDir, buildPkg.Dir)
check(err)
if alteredPaths.Exists(depRelativePath) {
impactedPaths.Add(depRelativePath)
return
}
}
// Check the package's test imports to see if any were altered
for _, dep := range pkg.TestImports {
buildPkg, err := build.Import(dep, projectDir, build.FindOnly)
check(err)
depRelativePath, err := filepath.Rel(projectDir, buildPkg.Dir)
check(err)
if alteredPaths.Exists(depRelativePath) {
impactedPaths.Add(depRelativePath)
return
}
}
// Check the package's external test imports to see if any were altered
for _, dep := range pkg.XTestImports {
buildPkg, err := build.Import(dep, projectDir, build.FindOnly)
check(err)
depRelativePath, err := filepath.Rel(projectDir, buildPkg.Dir)
check(err)
if alteredPaths.Exists(depRelativePath) {
impactedPaths.Add(depRelativePath)
return
}
}
}()
}
return impactedPaths
}
func debugDo(fn func()) {
if *debug {
fn()
}
}
func shell(executable string, args ...string) []byte {
cmd := exec.Command(executable, args...)
cmd.Stderr = os.Stderr
output, err := cmd.Output()
check(err)
return output
}
func check(err error) {
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func failf(err string) {
fmt.Println(err)
os.Exit(1)
}
func printJSON(v interface{}) error {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", "\t")
return enc.Encode(v)
}
|
package supervisor
import (
"fmt"
"github.com/couchbase/cbauth"
"github.com/couchbase/eventing/logging"
"github.com/couchbase/eventing/util"
)
var getHTTPServiceAuth = func(args ...interface{}) error {
s := args[0].(*SuperSupervisor)
user := args[1].(*string)
password := args[2].(*string)
var err error
clusterURL := fmt.Sprintf("127.0.0.1:%s", s.restPort)
*user, *password, err = cbauth.GetHTTPServiceAuth(clusterURL)
if err != nil {
logging.Errorf("SSCO Failed to get cluster auth details, err: %v", err)
}
return err
}
var getEventingNodeAddrsCallback = func(args ...interface{}) error {
s := args[0].(*SuperSupervisor)
addrs := args[1].(*[]string)
var err error
clusterURL := fmt.Sprintf("127.0.0.1:%s", s.restPort)
*addrs, err = util.EventingNodesAddresses(s.auth, clusterURL)
if err != nil {
logging.Errorf("SSCO Failed to get addresses for nodes running eventing service, err: %v", err)
} else if len(*addrs) == 0 {
logging.Errorf("SSCO no eventing nodes reported")
return fmt.Errorf("0 nodes reported for eventing service, unexpected")
} else {
logging.Infof("SSCO addrs: %#v", addrs)
}
return err
}
var getCurrentEventingNodeAddrCallback = func(args ...interface{}) error {
s := args[0].(*SuperSupervisor)
addr := args[1].(*string)
var err error
clusterURL := fmt.Sprintf("127.0.0.1:%s", s.restPort)
*addr, err = util.CurrentEventingNodeAddress(s.auth, clusterURL)
if err != nil {
logging.Errorf("SSVA Failed to get address for current eventing node, err: %v", err)
}
return err
}
|
package main
import "fmt"
func searchRange(A []int, target int) []int {
// write your code here
res := make([]int, 2)
flag := true
if len(A) == 0 {
return []int{-1, -1}
}
for i := 0; i < len(A); i++ {
if A[i] == target {
if flag {
res[0] = i
flag = false
} else {
res[1] = i
}
}
}
return res
}
func main() {
A := []int{5, 7, 7, 8, 8, 10}
fmt.Println(searchRange(A, 8))
}
|
package postgres
import (
"context"
"database/sql"
"encoding/json"
"github.com/pganalyze/collector/state"
)
const typesSQL string = `
SELECT t.oid,
t.typarray AS arrayoid,
n.nspname AS schema,
t.typname AS name,
t.typtype AS type,
CASE WHEN t.typtype = 'd' THEN pg_catalog.format_type(t.typbasetype, t.typtypmod) END AS domain_type,
t.typnotnull AS domain_not_null,
t.typdefault AS domain_default,
COALESCE(
CASE t.typtype
WHEN 'd' THEN
(SELECT pg_catalog.json_agg(pg_catalog.pg_get_constraintdef(oid, FALSE)) FROM pg_catalog.pg_constraint WHERE contypid = t.oid)::text
WHEN 'e' THEN
(SELECT pg_catalog.json_agg(enumlabel ORDER BY enumsortorder) FROM pg_catalog.pg_enum WHERE enumtypid = t.oid)::text
WHEN 'c' THEN
(SELECT pg_catalog.json_agg(ARRAY[attname, pg_catalog.format_type(atttypid, atttypmod)]) FROM pg_catalog.pg_attribute WHERE attrelid = t.typrelid)::text
END
, '[]') AS json
FROM pg_catalog.pg_type t
INNER JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE t.typtype <> 'b'
AND (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))
AND NOT EXISTS (SELECT 1 FROM pg_catalog.pg_type el WHERE el.oid = t.typelem AND el.typarray = t.oid)
AND t.oid NOT IN (SELECT pd.objid FROM pg_catalog.pg_depend pd WHERE pd.deptype = 'e' AND pd.classid = 'pg_catalog.pg_type'::regclass)
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
`
func GetTypes(ctx context.Context, db *sql.DB, postgresVersion state.PostgresVersion, currentDatabaseOid state.Oid) ([]state.PostgresType, error) {
stmt, err := db.PrepareContext(ctx, QueryMarkerSQL+typesSQL)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return nil, err
}
defer rows.Close()
var types []state.PostgresType
for rows.Next() {
var t state.PostgresType
var arrayString string
t.DatabaseOid = currentDatabaseOid
err := rows.Scan(
&t.Oid, &t.ArrayOid, &t.SchemaName, &t.Name, &t.Type, &t.DomainType, &t.DomainNotNull, &t.DomainDefault, &arrayString)
if err != nil {
return nil, err
}
if t.Type == "d" {
json.Unmarshal([]byte(arrayString), &t.DomainConstraints)
}
if t.Type == "e" {
json.Unmarshal([]byte(arrayString), &t.EnumValues)
}
if t.Type == "c" {
json.Unmarshal([]byte(arrayString), &t.CompositeAttrs)
}
types = append(types, t)
}
if err = rows.Err(); err != nil {
return nil, err
}
return types, nil
}
|
package contact
import (
"github.com/chidam1994/happyfox/models"
"github.com/google/uuid"
)
type Repository interface {
Save(contact *models.Contact) (uuid.UUID, error)
Delete(contactId uuid.UUID) error
Find(filterMap map[models.Filter]string) ([]models.Contact, error)
FindById(contactId uuid.UUID) (*models.Contact, error)
FindByName(name string) (*models.Contact, error)
}
type Service interface {
SaveContact(contact *models.Contact) (uuid.UUID, error)
FindContacts(filterMap map[models.Filter]string) ([]models.Contact, error)
DeleteContact(contactId uuid.UUID) error
GetContact(contactId uuid.UUID) (*models.Contact, error)
}
|
package consumer
import (
"fmt"
"time"
)
func newVbProcessingStats(appName string) vbStats {
vbsts := make(vbStats, numVbuckets)
for i := uint16(0); i < numVbuckets; i++ {
vbsts[i] = &vbStat{
stats: make(map[string]interface{}),
}
vbsts[i].stats["last_processed_seq_no"] = uint64(0)
vbsts[i].stats["dcp_stream_status"] = dcpStreamStopped
vbsts[i].stats["assigned_worker"] = ""
vbsts[i].stats["requesting_worker"] = ""
// Stored in memory but not persisted to disk,
// persistence to disk takes place periodically. Below 4 stats
// will be updated by plasma writer routines
vbsts[i].stats["plasma_last_seq_no_stored"] = uint64(0)
vbsts[i].stats["plasma_last_seq_no_persisted"] = uint64(0)
// Below stats will be updated by plasma reader routines, which
// will process timer events
vbsts[i].stats["currently_processed_doc_id_timer"] = time.Now().UTC().Format(time.RFC3339)
vbsts[i].stats["currently_processed_non_doc_timer"] = fmt.Sprintf("%s::%s", appName, time.Now().UTC().Format(time.RFC3339))
vbsts[i].stats["last_processed_doc_id_timer_event"] = ""
vbsts[i].stats["next_doc_id_timer_to_process"] = time.Now().UTC().Add(time.Second).Format(time.RFC3339)
vbsts[i].stats["next_non_doc_timer_to_process"] = fmt.Sprintf("%s::%s", appName, time.Now().UTC().Add(time.Second).Format(time.RFC3339))
vbsts[i].stats["doc_id_timer_processing_worker"] = ""
}
return vbsts
}
func (vbs vbStats) getVbStat(vb uint16, statName string) interface{} {
vbstat := vbs[vb]
vbstat.RLock()
defer vbstat.RUnlock()
return vbstat.stats[statName]
}
func (vbs vbStats) updateVbStat(vb uint16, statName string, val interface{}) {
vbstat := vbs[vb]
vbstat.Lock()
defer vbstat.Unlock()
vbstat.stats[statName] = val
}
|
package dubbo
import (
"bytes"
"encoding/binary"
)
const (
headerLength = 16
magicHigh = byte(0xda)
magicLow = byte(0xbb)
flagRequest = byte(0x80)
flagTwoWay = byte(0x40)
serializationID = byte(0x6)
)
// Dubbo ...
type Dubbo struct {
buffer *bytes.Buffer
databuf *bytes.Buffer
header []byte
dubboVersion []byte
}
// NewDubbo ...
func NewDubbo(dubboVersion []byte) *Dubbo {
return &Dubbo{bytes.NewBuffer([]byte("")), bytes.NewBuffer([]byte("")), make([]byte, headerLength), dubboVersion}
}
// Encode ...
func (dubbo *Dubbo) Encode(requestID uint64, interfaceName []byte, version []byte, method []byte, paramtypes []byte, args [][]byte) []byte {
//data := "\"2.0.1\"\n\"com.alibaba.dubbo.performance.demo.provider.IHelloService\"\nnull\n\"hash\"\n\"Ljava/lang/String;\"\n\"raHleA61L5jdyRtMDS8qszHlbYu6ZlyaRl1JPGTkrdZx0w550DvM0DosWs8QI0UW9j02KdTRaMTeIEnJ3v7XB0Ro5WIqzwAX91XCkXUhXBSV8o2WJI8ggeNGA7eMJrKJutYVoleMR2lXVHm9NmWpF2yRUoCy8cgP7nrcZTRG9zjLGTOAtXUmS1LOIUcR4XtxQ6eWWZnbsibfwKozr1hGxatLwcnVsu3rWvFPK1ig9GkTpzChxSzaCgSa3tnGMpUaLyuoknJubmBoTns513njSl9FcZIHcsZvgTpDV6eW87eHg1xjqKLOFRTrkgDJWwa53RXeFOpORsylW5pg6mb6wtaFNqTBEYsXvYX4SlKsKoLZ2t0aD85Qb6BbZULtLWFoXXKqvrbu2mWLOoIuR9gMwTNwr1UqpsC1rdMPLioRLn9fb04ZWAG3q0ZiVuqcCDv55g8TxEzRhfrxpLfRCh1CHqgKGctxbve42jvsJVbxrxXUM36SpzfbBAPdrDjm7C1Q3QUDJAuoNT3qUWFtvYtomuDJBGDYn2DZsNHOV42IGQlBerAjfzXUx2HQN3jAQ6pdBNYRexuiLT7rGPz7UNNN96uWK6xa4SqcxbRfICxt8Aw9GF5SI9F3qRiH0Zd2F0nwYpBn5\"\n{\"path\":\"com.alibaba.dubbo.performance.demo.provider.IHelloService\"}"
dubbo.databuf.Reset()
dubbo.attach(dubbo.dubboVersion)
dubbo.attach(interfaceName)
dubbo.attach(version)
dubbo.attach(method)
dubbo.attach(paramtypes)
for _, arg := range args {
dubbo.databuf.WriteString("\"")
dubbo.databuf.Write(arg)
dubbo.databuf.WriteString("\"\n")
}
dubbo.attachJSON([]byte("path"), interfaceName)
dubbo.makeHeader(uint32(dubbo.databuf.Len()), requestID)
dubbo.buffer.Write(dubbo.databuf.Bytes())
return dubbo.buffer.Bytes()
}
func (dubbo *Dubbo) attach(bs []byte) {
if bs == nil {
dubbo.databuf.WriteString("null\n")
} else {
dubbo.databuf.WriteString("\"")
dubbo.databuf.Write(bs)
dubbo.databuf.WriteString("\"\n")
}
}
func (dubbo *Dubbo) attachJSON(key []byte, val []byte) {
dubbo.databuf.WriteString("{\"")
dubbo.databuf.Write(key)
dubbo.databuf.WriteString("\":")
dubbo.databuf.WriteString("\"")
dubbo.databuf.Write(val)
dubbo.databuf.WriteString("\"}\n")
//"{\"" + key + "\":" + "\"" + val + "\"}\n"
}
func (dubbo *Dubbo) makeHeader(dataLen uint32, requestID uint64) {
// header length.
//FLAG_EVENT := byte(0x20)
// header.
dubbo.buffer.Reset()
// set magic number
dubbo.header[0] = magicHigh
dubbo.header[1] = magicLow
// set request and serilization bit
dubbo.header[2] = byte(flagRequest | flagTwoWay | serializationID)
// set request id
binary.BigEndian.PutUint64(dubbo.header[4:12], requestID)
// encode request data.
binary.BigEndian.PutUint32(dubbo.header[12:16], dataLen)
dubbo.buffer.Write(dubbo.header)
}
|
package goautils
import (
"context"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/codeclysm/ctxlog/v2"
httpmdlwr "goa.design/goa/v3/http/middleware"
goamdlwr "goa.design/goa/v3/middleware"
)
type logger interface {
Debug(msg string, fields ...map[string]interface{})
Info(msg string, fields ...map[string]interface{})
Error(msg string, fields ...map[string]interface{})
}
// ErrorHandler returns a function that writes and logs the given error.
func ErrorHandler(log logger) func(context.Context, http.ResponseWriter, error) {
return func(ctx context.Context, w http.ResponseWriter, err error) {
log.Error(err.Error())
}
}
// RequestID is a wrapper around the goa middleware with the same name,
// except it also augments the ctxlog with the request id
func RequestID() func(http.Handler) http.Handler {
goaReqID := httpmdlwr.RequestID(
httpmdlwr.UseXRequestIDHeaderOption(true),
httpmdlwr.XRequestHeaderLimitOption(128),
)
return func(h http.Handler) http.Handler {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqID := ctx.Value(goamdlwr.RequestIDKey)
ctx = ctxlog.WithFields(ctx, map[string]interface{}{"reqID": reqID})
h.ServeHTTP(w, r.WithContext(ctx))
})
return goaReqID(handler)
}
}
// ListenGracefully will start a listener on a specified address and blocks
// until it receives an interrupt or a sigterm, at which point it will start
// a graceful shutdown and finally return.
// You MUST provide a logger, it's the only way to see if there are errors
func ListenGracefully(addr string, handler http.Handler, log logger) {
srv := &http.Server{Addr: addr, Handler: handler}
idleConnsClosed := make(chan struct{})
log.Info("Listen and gracefully shutdown", map[string]interface{}{
"addr": addr,
})
go func() {
sigint := make(chan os.Signal, 1)
// interrupt signal sent from terminal
signal.Notify(sigint, os.Interrupt)
// sigterm signal sent from kubernetes
signal.Notify(sigint, syscall.SIGTERM)
<-sigint
log.Info("Start graceful shutdown. Waiting for current request to finish")
// We received an interrupt signal, shut down.
if err := srv.Shutdown(context.Background()); err != nil {
// Error from closing listeners, or context timeout:
log.Error("HTTP server Shutdown", map[string]interface{}{
"err": err,
})
}
close(idleConnsClosed)
}()
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
// Error starting or closing listener:
log.Error("HTTP server ListenAndServe", map[string]interface{}{
"err": err,
})
}
<-idleConnsClosed
log.Info("Shutdown")
}
|
package phase
import (
"github.com/felixangell/goof/cc/unit"
)
type Phase interface {
ExecutePhase(file *unit.SourceFile)
}
|
package logger
import (
"fmt"
"net"
"os"
"runtime/debug"
"time"
rotatelogs "github.com/lestrrat-go/file-rotatelogs"
"github.com/rs/zerolog"
)
var PackageField = "package"
var ModuleField = "module"
var FuncName = "func"
var LocalIpField = "localIps"
//TimeFormat default timefield format
var TimeFormat = "2006-01-02 15:04:05.999"
type FileLogger struct {
zerolog.Logger
rotateLogs *rotatelogs.RotateLogs
options Options
ips []string
}
type Options struct {
Console bool
Level string
//File eg. access_log.%Y%m%d
File string
//current log file link
FileLink string
MaxAge float64 // days
RotationTime float64 //days
ForceNewFile bool
ShowLocalIp bool
}
func rotateLogOptions(options Options) []rotatelogs.Option {
var args []rotatelogs.Option
if options.FileLink != "" {
args = append(args, rotatelogs.WithLinkName(options.FileLink))
}
if options.MaxAge <= 0 {
args = append(args, rotatelogs.WithMaxAge(-1))
} else {
args = append(args, rotatelogs.WithMaxAge(time.Duration(options.MaxAge*float64(24)*float64(time.Hour))))
}
if options.RotationTime > 0 {
args = append(args, rotatelogs.WithRotationTime(time.Duration(options.RotationTime*float64(24)*float64(time.Hour))))
} else {
args = append(args, rotatelogs.WithRotationTime(24*time.Hour))
}
if options.ForceNewFile {
args = append(args, rotatelogs.ForceNewFile())
}
return args
}
//NewLogger
func NewLogger(options Options) FileLogger {
zerolog.TimeFieldFormat = TimeFormat
var ips []string
var err error
if options.ShowLocalIp {
ips, err = localIPv4s()
if err != nil {
fmt.Println("NewLogger failed on get local IPv4s: ", err)
}
}
fileLogger := FileLogger{options: options, ips: ips}
rotateLogs, err := rotatelogs.New(options.File, rotateLogOptions(options)...)
if err != nil {
fmt.Println("NewLogger failed on creating rotatelogs: ", err)
return fileLogger
}
fileLogger.rotateLogs = rotateLogs
logger := zerolog.New(fileLogger).With().Timestamp().Logger()
level, err := zerolog.ParseLevel(options.Level)
if err != nil {
fmt.Println("NewLogger failed on parse level:"+options.Level, err)
}
zerolog.ErrorMarshalFunc = func(err error) interface{} {
stack := debug.Stack()
if options.Console {
os.Stderr.Write(stack)
}
return string(stack)
}
fileLogger.Logger = logger.Level(level)
return fileLogger
}
type ModuleHook struct {
pkg string
mod string
ips []string
}
func (h ModuleHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
if h.pkg != "" {
e.Str(PackageField, h.pkg)
}
if h.mod != "" {
e.Str(ModuleField, h.mod)
}
if len(h.ips) > 0 {
e.Strs(LocalIpField, h.ips)
}
}
//Fork new log for module
func (f FileLogger) Fork(pkg, mod string) FileLogger {
return FileLogger{Logger: f.Hook(ModuleHook{pkg: pkg, mod: mod, ips: f.ips})}
}
func (f FileLogger) WithPkg(pkg string) FileLogger {
return FileLogger{Logger: f.Hook(ModuleHook{pkg: pkg, ips: f.ips})}
}
func (f FileLogger) Write(p []byte) (n int, err error) {
if f.options.File == "" {
return 0, nil
}
if f.options.Console {
fmt.Print(string(p))
}
return f.rotateLogs.Write(p)
}
func (f FileLogger) Trace() *Event {
return &Event{f.Logger.Trace()}
}
func (f FileLogger) Debug() *Event {
return &Event{f.Logger.Debug()}
}
func (f FileLogger) Info() *Event {
return &Event{f.Logger.Info()}
}
func (f FileLogger) Warn() *Event {
return &Event{f.Logger.Warn()}
}
func (f FileLogger) Error() *Event {
return &Event{f.Logger.Error()}
}
func (f FileLogger) Fatal() *Event {
return &Event{f.Logger.Fatal()}
}
func (f FileLogger) Panic() *Event {
return &Event{f.Logger.Panic()}
}
func (f FileLogger) WithLevel(level zerolog.Level) *Event {
return &Event{f.Logger.WithLevel(level)}
}
// switch ----------------------
func (f FileLogger) TraceEnabled() bool {
return f.GetLevel() <= zerolog.TraceLevel
}
func (f FileLogger) DebugEnabled() bool {
return f.GetLevel() <= zerolog.DebugLevel
}
func (f FileLogger) InfoEnabled() bool {
return f.GetLevel() <= zerolog.InfoLevel
}
func (f FileLogger) WarnEnabled() bool {
return f.GetLevel() <= zerolog.WarnLevel
}
func (f FileLogger) ErrorEnabled() bool {
return f.GetLevel() <= zerolog.ErrorLevel
}
func (f FileLogger) FatalEnabled() bool {
return f.GetLevel() <= zerolog.FatalLevel
}
// method wrap ----------------------
type Event struct {
*zerolog.Event
}
func (e *Event) Str(key, val string) *Event {
e.Event.Str(key, val)
return e
}
func (e *Event) Err(err error) *Event {
e.Event.Err(err)
return e
}
func (e *Event) Stack() *Event {
e.Event.Stack()
return e
}
func (e *Event) Interface(key string, value interface{}) *Event {
e.Event.Interface(key, value)
return e
}
func (e *Event) Int(key string, val int) *Event {
e.Event.Int(key, val)
return e
}
func (e *Event) Int32(key string, val int32) *Event {
e.Event.Int32(key, val)
return e
}
func (e *Event) Int64(key string, val int64) *Event {
e.Event.Int64(key, val)
return e
}
// handy fns ----------------------
//Func add func field in log
func (e *Event) Func(funcName string) *Event {
e.Event.Str(FuncName, funcName)
return e
}
func (e *Event) F(funcName string) *Event {
return e.Func(funcName)
}
func (e *Event) Module(name string) *Event {
return e.Str(ModuleField, name)
}
func (e *Event) M(name string) *Event {
return e.Module(name)
}
func (e *Event) Pkg(name string) *Event {
return e.Str(PackageField, name)
}
func (e *Event) P(name string) *Event {
return e.Pkg(name)
}
func localIPv4s() ([]string, error) {
var ips []string
addrs, err := net.InterfaceAddrs()
if err != nil {
return ips, err
}
for _, a := range addrs {
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && ipnet.IP.To4() != nil {
ips = append(ips, ipnet.IP.String())
}
}
return ips, nil
}
|
// extracted from argoproj/argo-cd/pkg/apis/application/v1alpha1/types.go
package resource
import (
"encoding/json"
"gopkg.in/yaml.v2"
)
// ResourceIgnoreDifferences contains resource filter and list of json paths which should be ignored during comparison with live state.
type ResourceIgnoreDifferences struct {
Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
JSONPointers []string `json:"jsonPointers" protobuf:"bytes,5,opt,name=jsonPointers"`
}
// KnownTypeField contains mapping between CRD field and known Kubernetes type
type KnownTypeField struct {
Field string `json:"field,omitempty" protobuf:"bytes,1,opt,name=field"`
Type string `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
type OverrideIgnoreDiff struct {
JSONPointers []string `json:"jsonPointers" protobuf:"bytes,1,rep,name=jSONPointers"`
}
// ResourceOverride holds configuration to customize resource diffing and health assessment
type ResourceOverride struct {
HealthLua string `protobuf:"bytes,1,opt,name=healthLua"`
Actions string `protobuf:"bytes,3,opt,name=actions"`
IgnoreDifferences OverrideIgnoreDiff `protobuf:"bytes,2,opt,name=ignoreDifferences"`
KnownTypeFields []KnownTypeField `protobuf:"bytes,4,opt,name=knownTypeFields"`
}
type rawResourceOverride struct {
HealthLua string `json:"health.lua,omitempty"`
Actions string `json:"actions,omitempty"`
IgnoreDifferences string `json:"ignoreDifferences,omitempty"`
KnownTypeFields []KnownTypeField `json:"knownTypeFields,omitempty"`
}
func (s *ResourceOverride) UnmarshalJSON(data []byte) error {
raw := &rawResourceOverride{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
s.KnownTypeFields = raw.KnownTypeFields
s.HealthLua = raw.HealthLua
s.Actions = raw.Actions
return yaml.Unmarshal([]byte(raw.IgnoreDifferences), &s.IgnoreDifferences)
}
func (s ResourceOverride) MarshalJSON() ([]byte, error) {
ignoreDifferencesData, err := yaml.Marshal(s.IgnoreDifferences)
if err != nil {
return nil, err
}
raw := &rawResourceOverride{s.HealthLua, s.Actions, string(ignoreDifferencesData), s.KnownTypeFields}
return json.Marshal(raw)
}
func (o *ResourceOverride) GetActions() (ResourceActions, error) {
var actions ResourceActions
err := yaml.Unmarshal([]byte(o.Actions), &actions)
if err != nil {
return actions, err
}
return actions, nil
}
type ResourceActions struct {
ActionDiscoveryLua string `json:"discovery.lua,omitempty" yaml:"discovery.lua,omitempty" protobuf:"bytes,1,opt,name=actionDiscoveryLua"`
Definitions []ResourceActionDefinition `json:"definitions,omitempty" protobuf:"bytes,2,rep,name=definitions"`
}
type ResourceActionDefinition struct {
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
ActionLua string `json:"action.lua" yaml:"action.lua" protobuf:"bytes,2,opt,name=actionLua"`
}
|
package main
import (
goflag "flag"
"fmt"
"os"
)
func main() {
Execute()
}
// exitWithError will terminate execution with an error result
// It prints the error to stderr and exits with a non-zero exit code
func exitWithError(err error) {
fmt.Fprintf(os.Stderr, "\n%v\n", err)
os.Exit(1)
}
func Execute() {
goflag.Set("logtostderr", "true")
goflag.CommandLine.Parse([]string{})
c := NewCmdRoot(os.Stdout)
if err := c.Execute(); err != nil {
exitWithError(err)
}
}
|
package model
import (
"gorm.io/gorm"
"time"
)
// ApplePackage 定义表模型-苹果IPA包表
type ApplePackage struct {
ID int `gorm:"primary_key;AUTO_INCREMENT;comment:自增ID"`
BundleIdentifier string `gorm:"not null;column:bundleIdentifier;comment:安装包id"`
Name string `gorm:"not null;column:name;comment:包名"`
IconLink string `gorm:"null;column:icon_link;comment:图标下载链接"`
Version string `gorm:"not null;column:version;comment:版本"`
BuildVersion string `gorm:"not null;column:build_version;comment:编译版本号"`
MiniVersion string `gorm:"not null;column:mini_version;comment:最小支持版本"`
Summary string `gorm:"not null;column:summary;comment:简介"`
MobileConfigLink string `json:"-" gorm:"null;column:mobile_config_link;comment:获取UDID描述文件下载链接"`
IPAPath string `json:"-" gorm:"null;column:ipa_path;comment:原始IPA路径"`
Size float64 `gorm:"not null;column:size;comment:大小"`
Count int `gorm:"not null;column:count;comment:总下载量"`
CreatedAt time.Time `gorm:"not null;comment:创建时间"`
UpdatedAt time.Time `gorm:"not null;comment:更新时间"`
AppLink string `gorm:"-"`
}
func (a ApplePackage) TableName() string {
return "apple_package"
}
// InsertApplePackage 添加
func (a *ApplePackage) InsertApplePackage() error {
return db.Create(a).Error
}
// GetApplePackageByID 根据id获取
func GetApplePackageByID(id string) (*ApplePackage, error) {
var (
applePackage ApplePackage
err error
)
if err = db.Where("id = ?", id).First(&applePackage).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, err
}
return &applePackage, nil
}
// DeleteApplePackageByID 根据id删除
func DeleteApplePackageByID(id string) error {
return db.Where("id = ?", id).Delete(&ApplePackage{}).Error
}
// UpdateApplePackageMobileconfig 更新mobileconfig
func (a ApplePackage) UpdateApplePackageMobileconfig() error {
return db.Model(&a).Where("id = ?", a.ID).
Updates(map[string]interface{}{
"mobile_config_link": a.MobileConfigLink,
}).Error
}
// AddApplePackageCount 下载量+1
func (a ApplePackage) AddApplePackageCount() error {
return db.Model(&a).UpdateColumn("count", gorm.Expr("count + ?", 1)).Error
}
// GetAllApplePackage 获取所有
func GetAllApplePackage(pageSize, page int) (*PaginationQ, error) {
var (
applePackageList []*ApplePackage
err error
total int64
)
if err = db.Model(&ApplePackage{}).
//Scopes(Count(&total)).
Scopes(Paginate(&page, &pageSize)).
Find(&applePackageList).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, err
}
return &PaginationQ{
PageSize: pageSize,
Page: page,
Data: applePackageList,
Total: total,
}, nil
}
|
package ocpp
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/evcc-io/evcc/api"
"github.com/evcc-io/evcc/util"
"github.com/lorenzodonini/ocpp-go/ocpp1.6/core"
"github.com/lorenzodonini/ocpp-go/ocpp1.6/remotetrigger"
"github.com/lorenzodonini/ocpp-go/ocpp1.6/types"
)
const (
// Core profile keys
KeyNumberOfConnectors = "NumberOfConnectors"
// Meter profile keys
KeyMeterValuesSampledData = "MeterValuesSampledData"
KeyMeterValueSampleInterval = "MeterValueSampleInterval"
// Smart Charging profile keys
KeyChargeProfileMaxStackLevel = "ChargeProfileMaxStackLevel"
KeyChargingScheduleAllowedChargingRateUnit = "ChargingScheduleAllowedChargingRateUnit"
KeyChargingScheduleMaxPeriods = "ChargingScheduleMaxPeriods"
KeyConnectorSwitch3to1PhaseSupported = "ConnectorSwitch3to1PhaseSupported"
KeyMaxChargingProfilesInstalled = "MaxChargingProfilesInstalled"
// Alfen specific keys
KeyAlfenPlugAndChargeIdentifier = "PlugAndChargeIdentifier"
)
// TODO support multiple connectors
// Since ocpp-go interfaces at charge point level, we need to manage multiple connector separately
type CP struct {
mu sync.Mutex
once sync.Once
clock clock.Clock // mockable time
log *util.Logger
id string
connector int
connectC, statusC chan struct{}
connected bool
status *core.StatusNotificationRequest
meterUpdated time.Time
timeout time.Duration
measurements map[string]types.SampledValue
txnCount int // change initial value to the last known global transaction. Needs persistence
txnId int
}
func NewChargePoint(log *util.Logger, id string, connector int, timeout time.Duration) *CP {
return &CP{
clock: clock.New(),
log: log,
id: id,
connector: connector,
connectC: make(chan struct{}),
statusC: make(chan struct{}),
measurements: make(map[string]types.SampledValue),
timeout: timeout,
}
}
func (cp *CP) TestClock(clock clock.Clock) {
cp.clock = clock
}
func (cp *CP) ID() string {
cp.mu.Lock()
defer cp.mu.Unlock()
return cp.id
}
func (cp *CP) RegisterID(id string) {
cp.mu.Lock()
defer cp.mu.Unlock()
if cp.id != "" {
panic("ocpp: cannot re-register id")
}
cp.id = id
}
func (cp *CP) Connector() int {
return cp.connector
}
func (cp *CP) connect(connect bool) {
cp.mu.Lock()
defer cp.mu.Unlock()
cp.connected = connect
if connect {
cp.once.Do(func() {
close(cp.connectC)
})
}
}
func (cp *CP) HasConnected() <-chan struct{} {
return cp.connectC
}
func (cp *CP) Initialized() error {
// trigger status
time.AfterFunc(cp.timeout/2, func() {
select {
case <-cp.statusC:
return
default:
Instance().TriggerMessageRequest(cp.ID(), core.StatusNotificationFeatureName, func(request *remotetrigger.TriggerMessageRequest) {
request.ConnectorId = &cp.connector
})
}
})
// wait for status
select {
case <-cp.statusC:
return nil
case <-time.After(cp.timeout):
return api.ErrTimeout
}
}
// TransactionID returns the current transaction id
func (cp *CP) TransactionID() (int, error) {
cp.mu.Lock()
defer cp.mu.Unlock()
if !cp.connected {
return 0, api.ErrTimeout
}
return cp.txnId, nil
}
func (cp *CP) Status() (api.ChargeStatus, error) {
cp.mu.Lock()
defer cp.mu.Unlock()
res := api.StatusNone
if !cp.connected {
return res, api.ErrTimeout
}
if cp.status.ErrorCode != core.NoError {
return res, fmt.Errorf("%s: %s", cp.status.ErrorCode, cp.status.Info)
}
switch cp.status.Status {
case core.ChargePointStatusAvailable, // "Available"
core.ChargePointStatusUnavailable: // "Unavailable"
res = api.StatusA
case
core.ChargePointStatusPreparing, // "Preparing"
core.ChargePointStatusSuspendedEVSE, // "SuspendedEVSE"
core.ChargePointStatusSuspendedEV, // "SuspendedEV"
core.ChargePointStatusFinishing: // "Finishing"
res = api.StatusB
case core.ChargePointStatusCharging: // "Charging"
res = api.StatusC
case core.ChargePointStatusReserved, // "Reserved"
core.ChargePointStatusFaulted: // "Faulted"
return api.StatusF, fmt.Errorf("chargepoint status: %s", cp.status.ErrorCode)
default:
return api.StatusNone, fmt.Errorf("invalid chargepoint status: %s", cp.status.Status)
}
return res, nil
}
// WatchDog triggers meter values messages if older than timeout.
// Must be wrapped in a goroutine.
func (cp *CP) WatchDog(timeout time.Duration) {
for ; true; <-time.Tick(timeout) {
cp.mu.Lock()
update := cp.txnId != 0 && cp.clock.Since(cp.meterUpdated) > timeout
cp.mu.Unlock()
if update {
Instance().TriggerMeterValuesRequest(cp.ID(), cp.Connector())
}
}
}
func (cp *CP) isTimeout() bool {
return cp.timeout > 0 && cp.clock.Since(cp.meterUpdated) > cp.timeout
}
var _ api.Meter = (*CP)(nil)
func (cp *CP) CurrentPower() (float64, error) {
cp.mu.Lock()
defer cp.mu.Unlock()
if !cp.connected {
return 0, api.ErrTimeout
}
// zero value on timeout when not charging
if cp.isTimeout() {
if cp.txnId != 0 {
return 0, api.ErrTimeout
}
return 0, nil
}
if m, ok := cp.measurements[string(types.MeasurandPowerActiveImport)]; ok {
f, err := strconv.ParseFloat(m.Value, 64)
return scale(f, m.Unit), err
}
return 0, api.ErrNotAvailable
}
var _ api.MeterEnergy = (*CP)(nil)
func (cp *CP) TotalEnergy() (float64, error) {
cp.mu.Lock()
defer cp.mu.Unlock()
if !cp.connected {
return 0, api.ErrTimeout
}
// fallthrough for last value on timeout when not charging
if cp.txnId != 0 && cp.isTimeout() {
return 0, api.ErrTimeout
}
if m, ok := cp.measurements[string(types.MeasurandEnergyActiveImportRegister)]; ok {
f, err := strconv.ParseFloat(m.Value, 64)
return scale(f, m.Unit) / 1e3, err
}
return 0, api.ErrNotAvailable
}
func scale(f float64, scale types.UnitOfMeasure) float64 {
switch {
case strings.HasPrefix(string(scale), "k"):
return f * 1e3
case strings.HasPrefix(string(scale), "m"):
return f / 1e3
default:
return f
}
}
func getKeyCurrentPhase(phase int) string {
return string(types.MeasurandCurrentImport) + "@L" + strconv.Itoa(phase)
}
var _ api.PhaseCurrents = (*CP)(nil)
func (cp *CP) Currents() (float64, float64, float64, error) {
cp.mu.Lock()
defer cp.mu.Unlock()
if !cp.connected {
return 0, 0, 0, api.ErrTimeout
}
// zero value on timeout when not charging
if cp.isTimeout() {
if cp.txnId != 0 {
return 0, 0, 0, api.ErrTimeout
}
return 0, 0, 0, nil
}
currents := make([]float64, 0, 3)
for phase := 1; phase <= 3; phase++ {
m, ok := cp.measurements[getKeyCurrentPhase(phase)]
if !ok {
return 0, 0, 0, api.ErrNotAvailable
}
f, err := strconv.ParseFloat(m.Value, 64)
if err != nil {
return 0, 0, 0, fmt.Errorf("invalid current for phase %d: %w", phase, err)
}
currents = append(currents, scale(f, m.Unit))
}
return currents[0], currents[1], currents[2], nil
}
|
// Publish Play Events to Redis
package events
import (
"encoding/json"
log "github.com/Sirupsen/logrus"
"gopkg.in/redis.v3"
)
// Publish a Play Event from the Player to Redis. This sets the current
// playing track, the user and start time as well as publishing to the
// event channel
func PublishPlayEvent(c *redis.Client, track string, user string, start string) error {
var err error
// Create content for current key
data, err := json.Marshal(¤tTrack{
Track: track,
User: user,
})
if err != nil {
log.Errorf("Failed to marshal current key: %s", err)
}
// Create Transaction
tx := c.Multi()
// Execute Transaction
for {
_, err := tx.Exec(func() error {
tx.Set(currentTrackKey, string(data[:]), 0)
tx.Set(startTimeKey, start, 0)
tx.Set(pauseKey, "0", 0)
tx.Del(pauseTimeKey)
tx.Del(pauseDurrationKey)
return nil
})
if err == redis.TxFailedErr {
// Retry.
continue
} else if err != nil {
return err
}
break
}
// Generate message payload
payload, err := json.Marshal(&publishEventPayload{
Event: playEvent,
Track: track,
User: user,
})
if err != nil {
return err
}
// Publish Message
err = c.Publish(eventsChannel, string(payload[:])).Err()
if err != nil {
return err
}
return nil
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package bench
import "testing"
func BenchmarkTruncate(b *testing.B) {
tests := []RoundTripBenchTestCase{
{
name: "truncate 1 column 0 rows",
setup: "CREATE TABLE t(x INT);",
stmt: "TRUNCATE t",
},
{
name: "truncate 1 column 1 row",
setup: `CREATE TABLE t(x INT);
INSERT INTO t (x) VALUES (1);`,
stmt: "TRUNCATE t",
},
{
name: "truncate 1 column 2 rows",
setup: `CREATE TABLE t(x INT);
INSERT INTO t (x) VALUES (1);
INSERT INTO t (x) VALUES (2);`,
stmt: "TRUNCATE t",
},
{
name: "truncate 2 column 0 rows",
setup: `CREATE TABLE t(x INT, y INT);`,
stmt: "TRUNCATE t",
},
{
name: "truncate 2 column 1 rows",
setup: `CREATE TABLE t(x INT, y INT);
INSERT INTO t (x, y) VALUES (1, 1);`,
stmt: "TRUNCATE t",
},
{
name: "truncate 2 column 2 rows",
setup: `CREATE TABLE t(x INT, y INT);
INSERT INTO t (x, y) VALUES (1, 1);
INSERT INTO t (x,y) VALUES (2, 2);`,
stmt: "TRUNCATE t",
},
}
RunRoundTripBenchmark(b, tests)
}
|
package vppd
import (
"github.com/contiv/netplugin/netmaster/mastercfg"
)
const (
// StateOperPath is the path to the operations stored in state.
vppOperPathPrefix = mastercfg.StateOperPath + "vpp-driver/"
vppOperPath = vppOperPathPrefix + "%s"
)
|
package random
import (
"context"
"math/rand"
"github.com/go-kratos/kratos/v2/selector"
"github.com/go-kratos/kratos/v2/selector/node/direct"
)
var (
_ selector.Balancer = &Balancer{}
// Name is balancer name
Name = "random"
)
// Balancer is a random balancer.
type Balancer struct{}
// New random a selector.
func New() selector.Selector {
return &selector.Default{
Balancer: &Balancer{},
NodeBuilder: &direct.Builder{},
}
}
// Pick pick a weighted node.
func (p *Balancer) Pick(_ context.Context, nodes []selector.WeightedNode) (selector.WeightedNode, selector.DoneFunc, error) {
if len(nodes) == 0 {
return nil, nil, selector.ErrNoAvailable
}
cur := rand.Intn(len(nodes))
selected := nodes[cur]
d := selected.Pick()
return selected, d, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.