text stringlengths 11 4.05M |
|---|
package middlewares
import (
"net/http"
)
func XssProtectMiddleware() Adapter {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-XSS-Protection", "1; mode=block")
w.Header().Set("X-Frame-Options", "deny")
h.ServeHTTP(w, r)
})
}
}
|
package brightctl
import (
haikunator "github.com/atrox/haikunatorgo/v2"
"github.com/gorilla/sessions"
"github.com/shihtzu-systems/bright/pkg/ghost"
"github.com/shihtzu-systems/bright/pkg/tower"
log "github.com/sirupsen/logrus"
"net/http"
"path"
)
const (
hackBasePath = "/hack"
)
func HackPath(pieces ...string) string {
return path.Join(append([]string{hackBasePath}, pieces...)...)
}
type HackController struct {
SessionStore sessions.Store
Tower tower.Tower
HackToken tower.HackToken
}
func (c HackController) Id(w http.ResponseWriter, r *http.Request) string {
store, err := c.SessionStore.Get(r, c.Tower.SessionKey)
if err != nil {
log.Fatal(err)
}
name, exists := store.Values["name"]
if !exists {
name = generateSessionName()
store.Values["name"] = name
}
if err := store.Save(r, w); err != nil {
log.Fatal(err)
}
log.Debug("id: ", store.Values["name"].(string))
return store.Values["name"].(string)
}
func (c HackController) HandleRoot(w http.ResponseWriter, r *http.Request) {
log.Debug("handling ", HackPath())
g := ghost.NewGhost(c.Tower.System.Id, c.Id(w, r))
g.Materialize(c.Tower.Redis)
g.Token = ghost.BungieToken(c.HackToken)
g.Save(c.Tower.Redis)
w.Header().Set("Location", path.Join(bnetBasePath))
w.WriteHeader(http.StatusTemporaryRedirect)
}
func generateSessionName() string {
namer := haikunator.New()
namer.TokenLength = 6
return namer.Haikunate()
}
|
package main
// verify the token is valid or not
func verifyToken(token string) bool {
return true
}
|
/*
* Copyright 2021 American Express
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package git
import "github.com/dghubble/sling"
type errorResponse struct {
Errors []apiError
}
type pagedRequest struct {
Limit uint `url:"limit,omitempty"`
Start uint `url:"start,omitempty"`
}
type pagedResponse struct {
Size uint `json:"size"`
Limit uint `json:"limit"`
IsLastPage bool `json:"isLastPage"`
Start uint `json:"start"`
}
type link map[string]string
type links map[string][]link
type project struct {
Key string `json:"key,omitempty"`
ID uint `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
Public bool `json:"public,omitempty"`
Type string `json:"type,omitempty"`
Links links `json:"links,omitempty"`
}
type repository struct {
Slug string
ID uint
Name string
ScmID string
State string
StatusMessage string
Forkable bool
Project project
Public bool
Links links
}
type apiError struct {
Context string
Message string
ExceptionName string
}
type requestError struct {
Code int
Message string
}
func (r requestError) Error() string {
return r.Message
}
type getRepositoriesResponse struct {
pagedResponse
Values []repository
}
type bitClient struct {
sling *sling.Sling
Path string
}
// List is an interface for adding items to a list
type List interface {
Push(string)
}
// DiffItem is a diff struct for an inidividual file
type DiffItem struct {
raw string
fPath string
commit string
}
|
package leetcode
func containsDuplicate(nums []int) bool {
n := make(map[int]int, len(nums))
for _, op := range nums {
if n[op] != 0 {
return true
}
n[op]++
}
return false
}
|
package leetcode
import (
"testing"
"github.com/go-playground/assert/v2"
. "github.com/summerKK/leetcode-Go/utils"
)
func TestMergeTwoLists(t *testing.T) {
testcases := []struct {
arg0 *ListNode
arg1 *ListNode
except []int
}{
{
arg0: GenLinked([]int{1, 2, 4}),
arg1: GenLinked([]int{1, 3, 4}),
except: []int{1, 1, 2, 3, 4, 4},
},
{
arg0: GenLinked([]int{1, 2, 4, 5, 6, 7, 8}),
arg1: GenLinked([]int{4, 5, 6}),
except: []int{1, 2, 4, 4, 5, 5, 6, 6, 7, 8},
},
}
for _, testcase := range testcases {
head := mergeTwoLists(testcase.arg1, testcase.arg0)
assert.Equal(t, Traverse(head), testcase.except)
}
}
|
package mdproc
import (
"bytes"
"regexp"
"github.com/n0x1m/md2gmi/pipe"
)
// state function.
type stateFn func(*fsm, []byte) stateFn
// state machine.
type fsm struct {
state stateFn
i int
out chan pipe.StreamItem
// combining multiple input lines
multiLineBlockMode int
blockBuffer []byte
sendBuffer []byte
// if we have a termination rule to abide, e.g. implied code fences
pending []byte
}
func Preprocessor() pipe.Pipeline {
return (&fsm{}).pipeline
}
func (m *fsm) pipeline(in chan pipe.StreamItem) chan pipe.StreamItem {
m.out = make(chan pipe.StreamItem)
go func() {
for m.state = normal; m.state != nil; {
b, ok := <-in
if !ok {
m.blockFlush()
m.sync()
close(m.out)
m.state = nil
continue
}
m.state = m.state(wrap(m, b.Payload()))
m.sync()
}
}()
return m.out
}
func wrap(m *fsm, data []byte) (*fsm, []byte) {
var scount, ecount int
if scount = countStart(data, "<!--"); scount > 0 {
m.multiLineBlockMode += scount
}
if ecount = countEnd(data, "-->"); ecount > 0 {
m.multiLineBlockMode -= ecount
}
// clip entire line if no control sequences present
if (m.multiLineBlockMode > 0 && scount == 0 && ecount == 0) || m.multiLineBlockMode > 1 {
data = data[:0]
return m, data
}
// clip data past first occurrence
if scount > 0 {
data = data[:bytes.Index(data, []byte("<!--"))]
}
// clip data past last occurrence
if ecount = countEnd(data, "-->"); ecount > 0 {
data = data[bytes.LastIndex(data, []byte("-->"))+3:]
}
return m, data
}
func countStart(data []byte, pattern string) int {
return bytes.Count(data, []byte(pattern))
}
func countEnd(data []byte, pattern string) int {
return bytes.Count(data, []byte(pattern))
}
func (m *fsm) sync() {
if len(m.sendBuffer) > 0 {
m.sendBuffer = append(m.sendBuffer, '\n')
m.out <- pipe.NewItem(m.i, m.sendBuffer)
m.sendBuffer = m.sendBuffer[:0]
m.i++
}
}
func (m *fsm) softBlockFlush() {
if m.multiLineBlockMode > 0 {
return
}
m.blockFlush()
}
func (m *fsm) blockFlush() {
// blockBuffer to sendbuffer
m.sendBuffer = append(m.sendBuffer, m.blockBuffer...)
m.blockBuffer = m.blockBuffer[:0]
// pending to sendbuffer too
if len(m.pending) > 0 {
m.sendBuffer = append(m.sendBuffer, m.pending...)
m.pending = m.pending[:0]
}
}
func isTerminated(data []byte) bool {
return len(data) > 0 && data[len(data)-1] != '.'
}
func triggerBreak(data []byte) bool {
if len(data) == 0 || len(data) == 1 && data[0] == '\n' {
return true
}
switch data[len(data)-1] {
case '.':
fallthrough
case ';':
fallthrough
case ':':
return true
}
return false
}
func handleList(data []byte) ([]byte, bool) {
// match italic, bold
nolist := regexp.MustCompile(`[\*_](.*)[\*_]`)
nosub := nolist.FindSubmatch(data)
// match lists
list := regexp.MustCompile(`^([ \t]*[-*^]{1,1})[^*-]`)
sub := list.FindSubmatch(data)
// if lists, collapse to single level
if len(sub) > 1 && len(nosub) <= 1 {
return bytes.Replace(data, sub[1], []byte("-"), 1), true
}
return data, false
}
func hasFence(data []byte) bool {
return bytes.Contains(data, []byte("```"))
}
func needsFence(data []byte) bool {
return len(data) >= 4 && string(data[0:4]) == " "
}
func normalText(m *fsm, data []byte) stateFn {
if len(bytes.TrimSpace(data)) == 0 {
return normal
}
if data, isList := handleList(data); isList {
m.blockBuffer = append(m.blockBuffer, data...)
m.softBlockFlush()
return list
}
if hasFence(data) {
m.blockBuffer = append(data, '\n')
return fence
}
if needsFence(data) {
m.blockBuffer = append(m.blockBuffer, []byte("```\n")...)
m.blockBuffer = append(m.blockBuffer, append(data[4:], '\n')...)
m.pending = []byte("```\n")
return toFence
}
if isTerminated(data) {
m.blockBuffer = append(m.blockBuffer, data...)
m.blockBuffer = append(m.blockBuffer, ' ')
return paragraph
}
m.blockBuffer = append(m.blockBuffer, append(data, '\n')...)
m.softBlockFlush()
return normal
}
func normal(m *fsm, data []byte) stateFn {
return normalText(m, data)
}
func list(m *fsm, data []byte) stateFn {
if data, isList := handleList(data); isList {
data = append(data, '\n')
m.blockBuffer = append(m.blockBuffer, data...)
return list
}
m.softBlockFlush()
return normalText(m, data)
}
func fence(m *fsm, data []byte) stateFn {
m.blockBuffer = append(m.blockBuffer, append(data, '\n')...)
// second fence returns to normal
if hasFence(data) {
m.softBlockFlush()
return normal
}
return fence
}
func toFence(m *fsm, data []byte) stateFn {
if needsFence(data) {
data = append(data, '\n')
m.blockBuffer = append(m.blockBuffer, data[4:]...)
return toFence
}
m.softBlockFlush()
return normalText(m, data)
}
func paragraph(m *fsm, data []byte) stateFn {
if triggerBreak(data) {
m.blockBuffer = append(m.blockBuffer, data...)
m.blockBuffer = bytes.TrimSpace(m.blockBuffer)
// TODO, remove double spaces inside paragraphs
m.blockBuffer = append(m.blockBuffer, '\n')
m.softBlockFlush()
return normal
}
m.blockBuffer = append(m.blockBuffer, data...)
m.blockBuffer = append(m.blockBuffer, []byte(" ")...)
return paragraph
}
|
package models
type JsonReservationResponse struct {
OK bool `json:"ok"`
Message string `json:"message"`
RoomID string `json:"room_id"`
StartDate string `json:"start_date"`
EndDate string `json:"end_date"`
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/alecthomas/jsonschema"
)
func check(e error) {
if e != nil {
panic(e)
}
}
var schemaFolder string = "./jsonschemas"
func ensureSchemaFolder() {
if _, err := os.Stat(schemaFolder); os.IsNotExist(err) {
err := os.Mkdir(schemaFolder, 0755)
check(err)
}
}
func main() {
m := getStructs()
ensureSchemaFolder()
for file, iface := range m {
schema := jsonschema.Reflect(iface)
jschema, _ := json.MarshalIndent(schema, "", " ")
filename := schemaFolder + "/" + file + ".json"
fmt.Printf("Writing %s\n", filename)
err := ioutil.WriteFile(filename, jschema, 0644)
check(err)
}
}
|
package solutions
type BSTIterator struct {
stack []*TreeNode
}
type GraphNode struct {
Val int
Neighbors []*GraphNode
}
type ListNode struct {
Val int
Next *ListNode
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type Node struct {
Val int
Left *Node
Right *Node
Next *Node
Random *Node
}
|
package matchmaker
import (
"context"
"log"
"sort"
"sync"
"time"
"github.com/xssnick/wint/pkg/repo"
)
type Repo interface {
CreateGame(ctx context.Context, owner uint64, users []uint64) (uint64, error)
GetGameState(ctx context.Context, user uint64) (repo.MatchState, error)
}
type matchQueue struct {
wantStart bool
searchStarted int64
owner playerInfo
players map[uint64]*playerInfo
lockPlayers sync.RWMutex
}
type playerInfo struct {
uid uint64
seen int64
mode int
match *matchQueue
}
type Config struct {
Repo Repo
PingValid int64
MaxPlayers int
MinPlayers int
LongSearchSec int64
TickEvery time.Duration
}
type Matchmaker struct {
stopper chan bool
matches map[uint64]*matchQueue
players map[uint64]*playerInfo
lockMatches sync.RWMutex
lockPlayers sync.RWMutex
Config
}
func NewMatchmaker(cfg Config) *Matchmaker {
return &Matchmaker{
stopper: make(chan bool),
matches: map[uint64]*matchQueue{},
players: map[uint64]*playerInfo{},
Config: cfg,
}
}
func (m *Matchmaker) LinkRoutine() {
mpToDel := make([]uint64, 0, 8)
mToDel := make([]uint64, 0, 128)
pToDel := make([]uint64, 0, 1024)
mToStart := make([]*matchQueue, 0, 32)
mToFill := [][]*matchQueue{
make([]*matchQueue, 0, 256),
make([]*matchQueue, 0, 256),
make([]*matchQueue, 0, 256),
}
log.Println("Matchmaker started!")
for {
//log.Println(len(m.matches), len(m.players))
select {
case <-time.After(m.TickEvery):
case <-m.stopper:
return
}
pToDel = pToDel[:0]
mToDel = mToDel[:0]
mToStart = mToStart[:0]
for i := range mToFill {
mToFill[i] = mToFill[i][:0]
}
tm := time.Now().Unix()
// clean players
m.lockPlayers.Lock()
for uid, player := range m.players {
if player.seen+m.PingValid < tm {
pToDel = append(pToDel, uid)
}
}
for _, v := range pToDel {
log.Println("expired player", v)
delete(m.players, v)
}
m.lockPlayers.Unlock()
// clean matches
m.lockMatches.Lock()
for owner, match := range m.matches {
mpToDel = mpToDel[:0]
match.lockPlayers.Lock()
if match.owner.seen+m.PingValid < tm {
mToDel = append(mToDel, owner)
for _, p := range match.players {
p.match = nil
}
}
for uid, p := range match.players {
if p.seen+m.PingValid < tm {
mpToDel = append(mpToDel, uid)
}
}
for _, v := range mpToDel {
delete(match.players, v)
}
match.lockPlayers.Unlock()
}
for _, v := range mToDel {
log.Println("expired match", v)
delete(m.matches, v)
}
mToDel = mToDel[:0]
// start games
for owner, match := range m.matches {
if len(match.players) >= m.MaxPlayers || (len(match.players) >= m.MinPlayers && (match.searchStarted+m.LongSearchSec < tm || match.wantStart)) {
mToDel = append(mToDel, owner)
mToStart = append(mToStart, match)
} else {
if match.owner.mode < len(mToFill) {
mToFill[match.owner.mode] = append(mToFill[match.owner.mode], match)
}
}
}
for _, v := range mToDel {
log.Println("start match", v)
delete(m.matches, v)
}
m.lockMatches.Unlock()
pToDel = pToDel[:0]
for _, v := range mToStart {
v.lockPlayers.RLock()
for uid := range v.players {
pToDel = append(pToDel, uid)
}
v.lockPlayers.RUnlock()
}
for x := range mToFill {
sort.Slice(mToFill[x], func(i, j int) bool {
return len(mToFill[x][i].players) < len(mToFill[x][j].players)
})
}
i := 0
// fill players matches
m.lockPlayers.Lock()
for _, v := range pToDel {
log.Println("start player", v)
delete(m.players, v)
}
for x := range mToFill {
arr := mToFill[x]
if len(arr) > 0 {
for uid, player := range m.players {
if player.match == nil && player.mode == x {
randMatch := arr[i%len(arr)]
i++
randMatch.lockPlayers.Lock()
player.match = randMatch
randMatch.players[uid] = player
randMatch.lockPlayers.Unlock()
}
}
}
}
m.lockPlayers.Unlock()
for _, v := range mToStart {
v.lockPlayers.RLock()
var players = make([]uint64, 0, len(v.players))
for uid := range v.players {
players = append(players, uid)
}
v.lockPlayers.RUnlock()
log.Println("game created, owner", v)
_, err := m.Repo.CreateGame(context.Background(), v.owner.uid, players)
if err != nil {
log.Println("[Maker Routine] create match error:", err)
}
m.lockMatches.Lock()
delete(m.matches, v.owner.uid)
m.lockMatches.Unlock()
m.lockPlayers.Lock()
for _, p := range players {
delete(m.players, p)
}
m.lockPlayers.Unlock()
}
}
}
func (m *Matchmaker) FindMatch(user uint64, mode int) error {
if state, err := m.StateMatch(user, false); err == repo.ErrNotFound || (err == nil && state.FinishedAt > 0) {
m.lockPlayers.Lock()
m.players[user] = &playerInfo{
seen: time.Now().Unix(),
mode: mode,
match: nil,
}
m.lockPlayers.Unlock()
log.Println("FIND")
return nil
}
return repo.ErrAlreadyInSearch
}
func (m *Matchmaker) CreateMatch(user uint64, mode int) error {
if state, err := m.StateMatch(user, false); err == repo.ErrNotFound || (err == nil && state.FinishedAt > 0) {
m.lockMatches.Lock()
m.matches[user] = &matchQueue{
searchStarted: time.Now().Unix(),
owner: playerInfo{
uid: user,
seen: time.Now().Unix(),
mode: mode,
},
players: map[uint64]*playerInfo{},
}
m.lockMatches.Unlock()
return nil
}
return repo.ErrAlreadyInSearch
}
func (m *Matchmaker) StartMatch(user uint64) error {
m.lockMatches.RLock()
defer m.lockMatches.RUnlock()
match := m.matches[user]
if match != nil {
if len(match.players) >= m.MinPlayers {
match.wantStart = true
return nil
}
return repo.ErrNotEnoughPlayers
}
return repo.ErrNotFound
}
func (m *Matchmaker) StateMatch(user uint64, withPing bool) (repo.MatchState, error) {
m.lockMatches.RLock()
match := m.matches[user]
m.lockMatches.RUnlock()
if match != nil {
players := make([]uint64, 0, len(match.players))
log.Println("check state", user, len(match.players))
match.lockPlayers.RLock()
for p := range match.players {
players = append(players, p)
}
match.lockPlayers.RUnlock()
if withPing {
match.owner.seen = time.Now().Unix()
}
return repo.MatchState{
Players: players,
}, nil
}
m.lockPlayers.RLock()
player := m.players[user]
m.lockPlayers.RUnlock()
if player != nil {
if withPing {
player.seen = time.Now().Unix()
}
if player.match == nil {
return repo.MatchState{
Players: []uint64{},
}, nil
}
players := make([]uint64, 0, len(player.match.players))
player.match.lockPlayers.RLock()
for p := range player.match.players {
players = append(players, p)
}
player.match.lockPlayers.RUnlock()
return repo.MatchState{
Players: players,
}, nil
}
return m.Repo.GetGameState(context.Background(), user)
}
func (m *Matchmaker) ExitMatch(user uint64) error {
m.lockMatches.RLock()
match := m.matches[user]
m.lockMatches.RUnlock()
if match != nil {
m.lockMatches.Lock()
delete(m.matches, user)
m.lockMatches.Unlock()
match.lockPlayers.RLock()
for _, p := range match.players {
p.match = nil
}
match.lockPlayers.RUnlock()
return nil
}
m.lockPlayers.RLock()
player := m.players[user]
m.lockPlayers.RUnlock()
if player != nil {
if player.match != nil {
m.lockPlayers.Lock()
delete(m.players, user)
m.lockPlayers.Unlock()
player.match.lockPlayers.Lock()
delete(player.match.players, user)
player.match.lockPlayers.Unlock()
} else {
m.lockPlayers.Lock()
delete(m.players, user)
m.lockPlayers.Unlock()
}
return nil
}
return repo.ErrNotFound
}
|
package ioutil
import (
"fmt"
"testing"
)
func Test_Uint16(t *testing.T) {
tmp := make([]byte, 2)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
var i uint16
for i = 0; i < MaxUint16; i++ {
bo.PutUint16(tmp, i)
i2 := bo.Uint16(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_Uint24(t *testing.T) {
tmp := make([]byte, 3)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
var i uint32
for i = 0; i < MaxUint24; i++ {
bo.PutUint24(tmp, i)
i2 := bo.Uint24(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_Uint32(t *testing.T) {
tmp := make([]byte, 4)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
for _, i := range []uint32{0x0, uint32(MaxUint8), uint32(MaxUint16), uint32(MaxUint24), uint32(MaxUint32), 42} {
bo.PutUint32(tmp, i)
i2 := bo.Uint32(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_Uint40(t *testing.T) {
tmp := make([]byte, 5)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
for _, i := range []uint64{0x0, uint64(MaxUint8), uint64(MaxUint16), uint64(MaxUint24), uint64(MaxUint32), uint64(MaxInt40), 42} {
bo.PutUint40(tmp, i)
i2 := bo.Uint40(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_Uint48(t *testing.T) {
tmp := make([]byte, 6)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
for _, i := range []uint64{0x0, uint64(MaxUint8), uint64(MaxUint16), uint64(MaxUint24), uint64(MaxUint32), uint64(MaxInt40), uint64(MaxInt48), 42} {
bo.PutUint48(tmp, i)
i2 := bo.Uint48(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_Uint56(t *testing.T) {
tmp := make([]byte, 7)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
for _, i := range []uint64{0x0, uint64(MaxUint8), uint64(MaxUint16), uint64(MaxUint24), uint64(MaxUint32), uint64(MaxInt40), uint64(MaxInt48), uint64(MaxUint56), 42} {
bo.PutUint56(tmp, i)
i2 := bo.Uint56(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_Uint64(t *testing.T) {
tmp := make([]byte, 8)
for _, bo := range []ByteOrder{LittleEndian, BigEndian} {
for _, i := range []uint64{0x0,
uint64(MaxUint8), uint64(MaxUint16), uint64(MaxUint24), uint64(MaxUint32),
uint64(MaxInt40), uint64(MaxInt48), uint64(MaxUint56), uint64(MaxUint64), 42} {
bo.PutUint64(tmp, i)
i2 := bo.Uint64(tmp)
if i != i2 {
t.Fatalf("expected %d but got %d", i, i2)
}
}
}
}
func Test_PrintMaxes(t *testing.T) {
values := []interface{}{MaxInt8, MinInt8,
MaxInt16, MinInt16,
MaxInt24, MinInt24,
MaxInt32, MinInt32,
MaxInt40, MinInt40,
MaxInt48, MinInt48,
MaxInt56, MinInt56,
MaxInt64, MinInt64,
MaxUint8, MaxUint16, MaxUint24, MaxUint32, MaxUint40, MaxUint48, MaxUint56, uint64(MaxUint64)}
names := []string{"MaxInt8", "MinInt8",
"MaxInt16", "MinInt16",
"MaxInt24", "MinInt24",
"MaxInt32", "MinInt32",
"MaxInt40", "MinInt40",
"MaxInt48", "MinInt48",
"MaxInt56", "MinInt56",
"MaxInt64", "MinInt64",
"MaxUint8", "MaxUint16", "MaxUint24", "MaxUint32", "MaxUint40", "MaxUint48", "MaxUint56", "MaxUint64"}
for i := range values {
fmt.Printf("// %s is %d\n", names[i], values[i])
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
)
var in = bufio.NewScanner(os.Stdin)
var n, w, h, max, maxi, elen int
var e [5001][3]int
var dep [5001]int
var prev [5001]int
var ret [5001]int
type Matrix [5001][3]int
func init() {
in.Split(bufio.ScanWords)
for i := range dep {
dep[i] = 1
}
for i := range prev {
prev[i] = -1
}
}
func (m Matrix) Len() int { return elen }
func (m Matrix) Less(i, j int) bool { return m[i][0] < m[j][0] }
func (m *Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func main() {
n, w, h = readInt(), readInt(), readInt()
for i := 0; i < n; i++ {
tw, th := readInt(), readInt()
if tw > w && th > h {
e[elen][0], e[elen][1], e[elen][2] = tw, th, i
elen++
}
}
if elen == 0 {
fmt.Println("0")
return
}
m := Matrix(e)
sort.Sort(&m)
for i := 1; i < elen; i++ {
for j := 0; j < i; j++ {
if m[i][0] > m[j][0] && m[i][1] > m[j][1] && dep[i] < dep[j]+1 {
dep[i] = dep[j] + 1
prev[i] = j
}
}
}
for i := 0; i < elen; i++ {
if max < dep[i] {
max = dep[i]
maxi = i
}
}
i := maxi
j := 0
for {
ret[j] = m[i][2]
i = prev[i]
if i == -1 {
break
}
j++
}
fmt.Printf("%d\n", max)
for i := j; i >= 0; i-- {
fmt.Printf("%d ", ret[i]+1)
}
}
func readInt() int {
in.Scan()
n, _ := strconv.Atoi(in.Text())
return n
}
|
package main
import (
"bytes"
"fmt"
"sort"
"strings"
)
func main() {
fmt.Println(comma("-12345123154567"))
}
// handles signed/unsigned int and floats
func comma(s string) string {
n := len(s)
const size = 3
sign := 0
var buf bytes.Buffer
if s[0] == '-' {
sign = 1
}
if strings.ContainsRune(s, '.') {
n = strings.IndexRune(s, '.')
}
r := (n - sign) % size
buf.WriteString(s[:r+sign])
for i := r + sign; i < n-sign; i += size {
if sign <= 0 || len(buf.String()) != 1 {
buf.WriteByte(',')
}
buf.WriteString(s[i : i+size])
}
if n != len(s) {
buf.WriteString(s[n:])
}
return buf.String()
}
// subtype to define sorting methods
type sortBy []rune
func (a sortBy) Len() int { return len(a) }
func (a sortBy) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortBy) Less(i, j int) bool { return a[i] < a[j] }
func isAnagram(s1, s2 string) bool {
if len(s1) != len(s2) {
return false
}
var sl1 sortBy = strToSlice(s1)
var sl2 sortBy = strToSlice(s2)
sort.Sort(sl1)
sort.Sort(sl2)
return string(sl1) == string(sl2)
}
func strToSlice(s string) []rune {
rSlice := make([]rune, len(s))
for i, r := range s {
rSlice[i] = r
}
return rSlice
}
|
package run
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"regexp"
"strings"
"github.com/pgavlin/warp/go_wasm_exec"
"github.com/pgavlin/warp/load"
"github.com/pgavlin/warp/wasi"
"github.com/spf13/cobra"
)
// [to=]from(,flags)
type preopens struct {
values []wasi.Preopen
strings []string
}
var preopenRE = regexp.MustCompile(`^([^=]+=)?([^,]+)(,[^,]+)*$`)
func (p *preopens) parseOne(s string) (wasi.Preopen, error) {
match := preopenRE.FindStringSubmatch(s)
if len(match) == 0 {
return wasi.Preopen{}, fmt.Errorf("malformed preopen '%v': preopens must be of the form (to=)from(,flags)", s)
}
to, from, flags := match[1], match[2], strings.Split(match[3], ",")
if to == "" {
to = from
}
preopen := wasi.Preopen{
FSPath: from,
Path: to,
Rights: wasi.AllRights,
Inherit: wasi.AllRights,
}
if match[3] != "" {
for _, f := range flags {
r := &preopen.Rights
if strings.HasPrefix(f, "inherit:") {
r, f = &preopen.Inherit, f[len("inherit:"):]
}
switch f {
case "all":
*r |= wasi.AllRights
case "dir":
*r |= wasi.DirectoryRights
case "file":
*r |= wasi.FileRights
case "fd_datasync":
*r |= wasi.RightsFdDatasync
case "fd_read":
*r |= wasi.RightsFdRead
case "fd_seek":
*r |= wasi.RightsFdSeek
case "fd_fdstat_set_flags":
*r |= wasi.RightsFdFdstatSetFlags
case "fd_sync":
*r |= wasi.RightsFdSync
case "fd_tell":
*r |= wasi.RightsFdTell
case "fd_write":
*r |= wasi.RightsFdWrite
case "fd_advise":
*r |= wasi.RightsFdAdvise
case "fd_allocate":
*r |= wasi.RightsFdAllocate
case "path_create_directory":
*r |= wasi.RightsPathCreateDirectory
case "path_create_file":
*r |= wasi.RightsPathCreateFile
case "path_link_source":
*r |= wasi.RightsPathLinkSource
case "path_link_target":
*r |= wasi.RightsPathLinkTarget
case "path_open":
*r |= wasi.RightsPathOpen
case "fd_readdir":
*r |= wasi.RightsFdReaddir
case "path_readlink":
*r |= wasi.RightsPathReadlink
case "path_rename_source":
*r |= wasi.RightsPathRenameSource
case "path_rename_target":
*r |= wasi.RightsPathRenameTarget
case "path_filestat_get":
*r |= wasi.RightsPathFilestatGet
case "path_filestat_set_size":
*r |= wasi.RightsPathFilestatSetSize
case "path_filestat_set_times":
*r |= wasi.RightsPathFilestatSetTimes
case "fd_filestat_get":
*r |= wasi.RightsFdFilestatGet
case "fd_filestat_set_size":
*r |= wasi.RightsFdFilestatSetSize
case "fd_filestat_set_times":
*r |= wasi.RightsFdFilestatSetTimes
case "path_symlink":
*r |= wasi.RightsPathSymlink
case "path_remove_directory":
*r |= wasi.RightsPathRemoveDirectory
case "path_unlink_file":
*r |= wasi.RightsPathUnlinkFile
case "poll_fd_readwrite":
*r |= wasi.RightsPollFdReadwrite
case "sock_shutdown":
*r |= wasi.RightsSockShutdown
case "=all":
*r = wasi.AllRights
case "=dir":
*r = wasi.DirectoryRights
case "=file":
*r = wasi.FileRights
case "=ro":
*r = wasi.ReadOnlyRights
case "-all":
*r &^= wasi.AllRights
case "-dir":
*r &^= wasi.DirectoryRights
case "-file":
*r &^= wasi.FileRights
case "-fd_datasync":
*r &^= wasi.RightsFdDatasync
case "-fd_read":
*r &^= wasi.RightsFdRead
case "-fd_seek":
*r &^= wasi.RightsFdSeek
case "-fd_fdstat_set_flags":
*r &^= wasi.RightsFdFdstatSetFlags
case "-fd_sync":
*r &^= wasi.RightsFdSync
case "-fd_tell":
*r &^= wasi.RightsFdTell
case "-fd_write":
*r &^= wasi.RightsFdWrite
case "-fd_advise":
*r &^= wasi.RightsFdAdvise
case "-fd_allocate":
*r &^= wasi.RightsFdAllocate
case "-path_create_directory":
*r &^= wasi.RightsPathCreateDirectory
case "-path_create_file":
*r &^= wasi.RightsPathCreateFile
case "-path_link_source":
*r &^= wasi.RightsPathLinkSource
case "-path_link_target":
*r &^= wasi.RightsPathLinkTarget
case "-path_open":
*r &^= wasi.RightsPathOpen
case "-fd_readdir":
*r &^= wasi.RightsFdReaddir
case "-path_readlink":
*r &^= wasi.RightsPathReadlink
case "-path_rename_source":
*r &^= wasi.RightsPathRenameSource
case "-path_rename_target":
*r &^= wasi.RightsPathRenameTarget
case "-path_filestat_get":
*r &^= wasi.RightsPathFilestatGet
case "-path_filestat_set_size":
*r &^= wasi.RightsPathFilestatSetSize
case "-path_filestat_set_times":
*r &^= wasi.RightsPathFilestatSetTimes
case "-fd_filestat_get":
*r &^= wasi.RightsFdFilestatGet
case "-fd_filestat_set_size":
*r &^= wasi.RightsFdFilestatSetSize
case "-fd_filestat_set_times":
*r &^= wasi.RightsFdFilestatSetTimes
case "-path_symlink":
*r &^= wasi.RightsPathSymlink
case "-path_remove_directory":
*r &^= wasi.RightsPathRemoveDirectory
case "-path_unlink_file":
*r &^= wasi.RightsPathUnlinkFile
case "-poll_fd_readwrite":
*r &^= wasi.RightsPollFdReadwrite
case "-sock_shutdown":
*r &^= wasi.RightsSockShutdown
default:
return wasi.Preopen{}, fmt.Errorf("unknown preopen flag '%v'", f)
}
}
}
return preopen, nil
}
func (p *preopens) String() string {
return strings.Join(p.strings, ";")
}
func (p *preopens) Set(s string) error {
preopen, err := p.parseOne(s)
if err != nil {
return err
}
p.values, p.strings = append(p.values, preopen), append(p.strings, s)
return nil
}
func (p *preopens) Type() string {
return "mount"
}
func Command() *cobra.Command {
var preopen preopens
var debug bool
var trace string
command := &cobra.Command{
Use: "run [path to module]",
Short: "Run WebAssembly commands",
Long: "Run WebAssembly commands inside a WASI- or Go-compliant environment.",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("expected at least one argument")
}
mod, err := load.LoadFile(args[0])
if err != nil {
return err
}
isGo := false
if mod.Import != nil {
for _, entry := range mod.Import.Entries {
if entry.ModuleName == "go" {
isGo = true
break
}
}
}
def, err := load.Intepret(mod)
if err != nil {
return err
}
env := map[string]string{}
for _, v := range os.Environ() {
kvp := strings.SplitN(v, "=", 2)
env[kvp[0]] = kvp[1]
}
ext := filepath.Ext(args[0])
name := args[0][:len(args[0])-len(ext)]
var traceWriter io.Writer
if trace != "" {
traceFile, err := os.Create(trace)
if err != nil {
return err
}
defer traceFile.Close()
w := bufio.NewWriter(traceFile)
defer w.Flush()
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, os.Kill)
go func() {
for range c {
w.Flush()
os.Exit(-1)
}
}()
traceWriter = w
}
if isGo {
return go_wasm_exec.Run(name, def, &go_wasm_exec.Options{
Env: env,
Args: args[1:],
Debug: debug,
Trace: traceWriter,
Resolver: load.NewFSResolver(os.DirFS("."), load.Intepret),
})
}
return wasi.Run(name, def, &wasi.RunOptions{
Options: &wasi.Options{
Env: env,
Args: args[1:],
Preopen: preopen.values,
},
Debug: debug,
Trace: traceWriter,
Resolver: load.NewFSResolver(os.DirFS("."), load.Intepret),
})
},
}
command.PersistentFlags().VarP(&preopen, "mount", "m", "list of directories to mount in the form (to=)from(,flags)")
command.PersistentFlags().BoolVarP(&debug, "debug", "d", false, "enable debugging support")
command.PersistentFlags().StringVarP(&trace, "trace", "t", "", "write an execution trace to the specified file. Implies -d.")
return command
}
|
// generated by wsp, DO NOT EDIT.
package main
import "net/http"
import "time"
import "github.com/simplejia/namesrv/controller/admin"
import "github.com/simplejia/namesrv/controller"
import "github.com/simplejia/namesrv/filter"
func init() {
http.HandleFunc("/admin/relation/create", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(admin.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/admin/relation/create"}); !ok {
return
}
}()
c.Create(w, r)
})
http.HandleFunc("/admin/relation/delete", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(admin.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/admin/relation/delete"}); !ok {
return
}
}()
c.Delete(w, r)
})
http.HandleFunc("/admin/relation/list", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(admin.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/admin/relation/list"}); !ok {
return
}
}()
c.List(w, r)
})
http.HandleFunc("/admin/relation/update", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(admin.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/admin/relation/update"}); !ok {
return
}
}()
c.Update(w, r)
})
http.HandleFunc("/admin/stat/list", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(admin.Stat)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/admin/stat/list"}); !ok {
return
}
}()
c.List(w, r)
})
http.HandleFunc("/relation/getsFromIp", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(controller.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/relation/getsFromIp"}); !ok {
return
}
}()
c.GetsFromIp(w, r)
})
http.HandleFunc("/relation/getsFromName", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(controller.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/relation/getsFromName"}); !ok {
return
}
}()
c.GetsFromName(w, r)
})
http.HandleFunc("/relation/reportOff", func(w http.ResponseWriter, r *http.Request) {
t := time.Now()
_ = t
var e interface{}
c := new(controller.Relation)
defer func() {
e = recover()
if ok := filter.Boss(w, r, map[string]interface{}{"__T__": t, "__C__": c, "__E__": e, "__P__": "/relation/reportOff"}); !ok {
return
}
}()
c.ReportOff(w, r)
})
} |
// Package retry provides util functions to retry fail actions.
package retry
import (
"context"
"errors"
"time"
)
// ErrNeedRetry is a placholder helper, in case you have no error to return, such as bool status, etc.
var ErrNeedRetry = errors.New("need retry")
// State controls whether the fail action should continue retrying.
type State uint8
const (
// Continue continues retrying the fail action.
Continue State = iota
// StopWithErr stops retrying the fail action,
// returns the error which the RetryFunc returns.
StopWithErr
// StopWithNil stops retrying the fail action, returns nil.
StopWithNil
)
// Retrier retrys fail actions with backoff.
type Retrier struct {
backoffs []time.Duration
}
// New creates a new Retrier with backoffs, the backoffs is the wait
// time before each retrying.
// The count of retrying will be len(backoffs), the first call
// is not counted in retrying.
func New(backoffs []time.Duration) Retrier {
return Retrier{backoffs: append(backoffs, 0)}
}
// Run keeps calling the RetryFunc if it returns (Continue, non-nil-err),
// otherwise it will stop retrying. It is goroutine safe unless you do something wrong ^_^.
func (r Retrier) Run(ctx context.Context, try func() (State, error)) (err error) {
var state State
cancelc := ctx.Done()
for _, backoff := range r.backoffs {
state, err = try()
switch state {
case StopWithErr:
return err
case StopWithNil:
return nil
default: // Continue
}
if err == nil {
return nil
}
if backoff > 0 {
select {
case <-cancelc:
return ctx.Err()
case <-time.After(backoff):
}
} else {
select {
case <-cancelc:
return ctx.Err()
default:
}
}
}
return err
}
// Retry is a shortcut for Retrier.Run with context.Background().
func Retry(backoffs []time.Duration, try func() (State, error)) error {
return New(backoffs).Run(context.Background(), try)
}
// ConstantBackoffs creates a list of backoffs with constant values.
func ConstantBackoffs(n int, backoff time.Duration) []time.Duration {
backoffs := make([]time.Duration, n, n+1)
if backoff > 0 {
for i := 0; i < n; i++ {
backoffs[i] = backoff
}
}
return backoffs
}
// ZeroBackoffs creates a list of backoffs with zero values.
func ZeroBackoffs(n int) []time.Duration {
return ConstantBackoffs(n, 0)
}
// ExponentialBackoffs creates a list of backoffs with values are calculated by backoff*2^[0 1 2 .. n).
func ExponentialBackoffs(n int, backoff time.Duration) []time.Duration {
backoffs := make([]time.Duration, n, n+1)
if backoff > 0 {
for i := 0; i < n; i++ {
backoffs[i] = backoff * (1 << uint(i))
}
}
return backoffs
}
|
package services
import (
"io"
"net/http"
"os"
"path/filepath"
)
//UploadFileService ...
func UploadFileService(w http.ResponseWriter, r *http.Request) {
file, handler, err := r.FormFile("file")
if err != nil {
panic(err)
}
defer file.Close()
// copy example
absPath, _ := filepath.Abs(handler.Filename)
f, err := os.OpenFile(absPath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
panic(err)
}
defer f.Close()
io.Copy(f, file)
}
|
package types
import (
tmbytes "github.com/tendermint/tendermint/libs/bytes"
sdk "github.com/cosmos/cosmos-sdk/types"
)
const (
QueryDefinition = "definition" // query definition
QueryBinding = "binding" // query binding
QueryBindings = "bindings" // query bindings
QueryWithdrawAddress = "withdraw_address" // query withdrawal address
QueryRequest = "request" // query request
QueryRequests = "requests" // query requests
QueryResponse = "response" // query response
QueryRequestContext = "context" // query request context
QueryRequestsByReqCtx = "requests_by_ctx" // query requests by the request context
QueryResponses = "responses" // query responses
QueryEarnedFees = "fees" // query earned fees
QuerySchema = "schema" // query schema
QueryParameters = "parameters" // query parameters
)
// QueryDefinitionParams defines the params to query a service definition
type QueryDefinitionParams struct {
ServiceName string
}
// QueryBindingParams defines the params to query a service binding
type QueryBindingParams struct {
ServiceName string
Provider sdk.AccAddress
}
// QueryBindingsParams defines the params to query all bindings of a service definition with an optional owner
type QueryBindingsParams struct {
ServiceName string
Owner sdk.AccAddress
}
// QueryWithdrawAddressParams defines the params to query the withdrawal address of an owner
type QueryWithdrawAddressParams struct {
Owner sdk.AccAddress
}
// QueryRequestParams defines the params to query the request by ID
type QueryRequestParams struct {
RequestID []byte
}
// QueryRequestsParams defines the params to query active requests for a service binding
type QueryRequestsParams struct {
ServiceName string
Provider sdk.AccAddress
}
// QueryResponseParams defines the params to query the response to a request
type QueryResponseParams struct {
RequestID tmbytes.HexBytes
}
// QueryRequestContextParams defines the params to query the request context
type QueryRequestContextParams struct {
RequestContextID tmbytes.HexBytes
}
// QueryRequestsByReqCtxParams defines the params to query active requests by the request context ID
type QueryRequestsByReqCtxParams struct {
RequestContextID tmbytes.HexBytes
BatchCounter uint64
}
// QueryResponsesParams defines the params to query active responses by the request context ID
type QueryResponsesParams struct {
RequestContextID tmbytes.HexBytes
BatchCounter uint64
}
// QueryEarnedFeesParams defines the params to query the earned fees for a provider
type QueryEarnedFeesParams struct {
Provider sdk.AccAddress
}
// QuerySchemaParams defines the params to query the system schemas by the schema name
type QuerySchemaParams struct {
SchemaName string
}
|
package executor
import "errors"
var (
// ErrTaskRejected task was rejected because task queue is reach to max.
ErrTaskRejected = errors.New("executor: task rejected")
// ErrTaskCanceled task was canceled by timeout or by caller (with context.Context).
ErrTaskCanceled = errors.New("executor: task canceled")
)
type Local interface {
ID() int64
Name() string
Put(interface{}, interface{})
Get(interface{}) interface{}
}
type local struct {
id int64
name string
ctx map[interface{}]interface{}
}
func (l *local) ID() int64 {
return l.id
}
func (l *local) Name() string {
return l.name
}
func (l *local) Put(key, value interface{}) {
l.ctx[key] = value
}
func (l *local) Get(key interface{}) interface{} {
return l.ctx[key]
}
func NewLocal(id int64, name string) Local {
l := new(local)
l.id = id
l.name = name
l.ctx = make(map[interface{}]interface{}, 32)
return l
}
type LocalRunner func(Local)
type LocalCaller func(Local) (interface{}, error)
type Handler func(interface{}, error)
type Executor interface {
Submit(LocalRunner) error
Invoke(LocalCaller) (interface{}, error)
InvokeAsync(LocalCaller, callback Handler)
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync
import (
"context"
"fmt"
"io"
"github.com/pkg/errors"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
)
type SyncerMux []Syncer
func (s SyncerMux) Sync(ctx context.Context, out io.Writer, item *Item) error {
var errs []error
for _, syncer := range s {
if err := syncer.Sync(ctx, out, item); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
err := fmt.Errorf("sync failed for artifact %q", item.Image)
for _, e := range errs {
err = errors.Wrap(err, e.Error())
}
// Return an error only if all syncers fail
if len(errs) == len(s) {
return err
}
// Otherwise log the error as a warning
log.Entry(ctx).Warnf(err.Error())
}
return nil
}
|
package controllers
import (
"github.com/gin-gonic/gin"
"net/http"
"github.com/mickaelmagniez/elastic-alert/store"
)
type ElasticsController struct{}
func (ElasticsController) GetServers(c *gin.Context) {
servers, err := store.GetElasticServers()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err})
} else {
c.JSON(http.StatusOK, servers)
}
}
func (ElasticsController) GetIndices(c *gin.Context) {
indices, err := store.GetElasticIndicesOfServer(c.Query("url"))
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err})
} else {
c.JSON(http.StatusOK, indices)
}
}
func (ElasticsController) GetTypes(c *gin.Context) {
types, err := store.GetElasticTypesOfIndex(c.Query("url"), c.Query("index"))
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err})
} else {
c.JSON(http.StatusOK, types)
}
}
|
package main
import (
"fmt"
"github.com/aliyun/aliyun-datahub-sdk-go/datahub"
)
func main() {
dh = datahub.New(accessId, accessKey, endpoint)
}
func createSubscription() {
csr, err := dh.CreateSubscription(projectName, topicName, "sub comment")
if err != nil {
fmt.Println("create subscription failed")
fmt.Println(err)
return
}
fmt.Println("create subscription successful")
fmt.Println(*csr)
}
func getSubscription() {
gs, err := dh.GetSubscription(projectName, topicName, subId)
if err != nil {
fmt.Println("get subscription failed")
fmt.Println(err)
return
}
fmt.Println("get subscription successful")
fmt.Println(gs)
}
func delSubscription() {
if _, err := dh.DeleteSubscription(projectName, topicName, subId); err != nil {
if _, ok := err.(*datahub.ResourceNotFoundError); ok {
fmt.Println("subscription not found")
} else {
fmt.Println("delete subscription failed")
return
}
}
fmt.Println("delete subscription successful")
}
func listSubscription() {
pageIndex := 1
pageSize := 5
ls, err := dh.ListSubscription(projectName, topicName, pageIndex, pageSize)
if err != nil {
fmt.Println("get subscription list failed")
fmt.Println(err)
return
}
fmt.Println("get subscription list successful")
for _, sub := range ls.Subscriptions {
fmt.Println(sub)
}
}
func updateSubscription() {
if _, err := dh.UpdateSubscription(projectName, topicName, subId, "new sub comment"); err != nil {
fmt.Println("update subscription comment failed")
fmt.Println(err)
return
}
fmt.Println("update subscription comment successful")
}
func updateSubState() {
if _, err := dh.UpdateSubscriptionState(projectName, topicName, subId, datahub.SUB_OFFLINE); err != nil {
fmt.Println("update subscription state failed")
fmt.Println(err)
return
}
defer dh.UpdateSubscriptionState(projectName, topicName, subId, datahub.SUB_ONLINE)
fmt.Println("update subscription state successful")
}
|
package main
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"net/url"
"time"
)
// geetest 公钥
const CAPTCHA_ID string = "647f5ed2ed8acb4be36784e01556bb71"
// geetest 密钥
const CAPTCHA_KEY string = "b09a7aafbfd83f73b35a9b530d0337bf"
// geetest 服务地址
const API_SERVER string = "http://gcaptcha4.geetest.com"
// geetest 验证接口
const URL = API_SERVER + "/validate" + "?captcha_id=" + CAPTCHA_ID
// index.html
func index(response http.ResponseWriter, req *http.Request) {
html_response, err := template.ParseFiles("static/index.html")
if err == nil {
html_response.Execute(response, "")
}
}
// login
func login(writer http.ResponseWriter, request *http.Request) {
if request.Method != "GET" {
writer.WriteHeader(405)
return
}
// 前端传回的数据
datas := request.URL.Query()
lot_number := datas["lot_number"][0]
captcha_output := datas["captcha_output"][0]
pass_token := datas["pass_token"][0]
gen_time := datas["gen_time"][0]
// 生成签名
// 生成签名使用标准的hmac算法,使用用户当前完成验证的流水号lot_number作为原始消息message,使用客户验证私钥作为key
// 采用sha256散列算法将message和key进行单向散列生成最终的 “sign_token” 签名
sign_token := hmac_encode(CAPTCHA_KEY, lot_number)
// 向极验转发前端数据 + “sign_token” 签名
form_data := make(url.Values)
form_data["lot_number"] = []string{lot_number}
form_data["captcha_output"] = []string{captcha_output}
form_data["pass_token"] = []string{pass_token}
form_data["gen_time"] = []string{gen_time}
form_data["sign_token"] = []string{sign_token}
// 发起post请求
// 设置5s超时
cli := http.Client{Timeout: time.Second * 5}
resp, err := cli.PostForm(URL, form_data)
if err != nil || resp.StatusCode != 200 {
// 当请求发生异常时,应放行通过,以免阻塞业务。
fmt.Println("服务接口异常: ")
fmt.Println(err)
writer.Write([]byte("geetest server error"))
return
}
res_json, _ := ioutil.ReadAll(resp.Body)
var res_map map[string]interface{}
// 根据极验返回的用户验证状态, 网站主进行自己的业务逻辑
// 响应json数据如:{"result": "success", "reason": "", "captcha_args": {}}
if err := json.Unmarshal(res_json, &res_map); err == nil {
if err != nil {
fmt.Println("Json数据解析错误")
writer.Write([]byte("fail"))
}
result := res_map["result"]
if result == "success" {
fmt.Println("验证通过")
writer.Write([]byte("success"))
} else {
reason := res_map["reason"]
fmt.Print("验证失败: ")
fmt.Print(reason)
writer.Write([]byte("fail"))
}
}
}
// hmac-sha256 加密: CAPTCHA_KEY,lot_number
func hmac_encode(key string, data string) string {
mac := hmac.New(sha256.New, []byte(key))
mac.Write([]byte(data))
return hex.EncodeToString(mac.Sum(nil))
}
func main() {
http.HandleFunc("/", index)
http.HandleFunc("/login", login)
http.ListenAndServe(":8001", nil)
}
|
package problem0448
func findDisappearedNumbers(nums []int) []int {
for i := 0; i < len(nums); i++ {
for nums[nums[i]-1] != nums[i] {
nums[i], nums[nums[i]-1] = nums[nums[i]-1], nums[i]
}
}
res := []int{}
for pos, v := range nums {
if pos+1 != v {
res = append(res, pos+1)
}
}
return res
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockowner
import (
"encoding/binary"
"golang.org/x/time/rate"
"github.com/bitmark-inc/bitmarkd/blockrecord"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/merkle"
"github.com/bitmark-inc/bitmarkd/messagebus"
"github.com/bitmark-inc/bitmarkd/mode"
"github.com/bitmark-inc/bitmarkd/pay"
"github.com/bitmark-inc/bitmarkd/reservoir"
"github.com/bitmark-inc/bitmarkd/rpc/ratelimit"
"github.com/bitmark-inc/bitmarkd/storage"
"github.com/bitmark-inc/bitmarkd/transactionrecord"
"github.com/bitmark-inc/logger"
)
// Block Owner
// -----------
const (
rateLimitBlockOwner = 200
rateBurstBlockOwner = 100
)
// BlockOwner - the type of the RPC
type BlockOwner struct {
Log *logger.L
Limiter *rate.Limiter
Pool storage.Handle
Br blockrecord.Record
IsNormalMode func(mode.Mode) bool
IsTestingChain func() bool
Rsvr reservoir.Reservoir
}
// TxIDForBlockArguments - get the id for a given block number
type TxIDForBlockArguments struct {
BlockNumber uint64 `json:"blockNumber"`
}
// TxIDForBlockReply - results for block id
type TxIDForBlockReply struct {
TxId merkle.Digest `json:"txId"`
}
func New(log *logger.L, pools reservoir.Handles, isNormalMode func(mode.Mode) bool, isTestingChain func() bool, rsvr reservoir.Reservoir, br blockrecord.Record) *BlockOwner {
return &BlockOwner{
Log: log,
Limiter: rate.NewLimiter(rateLimitBlockOwner, rateBurstBlockOwner),
Pool: pools.Blocks,
Br: br,
IsNormalMode: isNormalMode,
IsTestingChain: isTestingChain,
Rsvr: rsvr,
}
}
// TxIDForBlock - RPC to get transaction id for block ownership record
func (bitmark *BlockOwner) TxIDForBlock(info *TxIDForBlockArguments, reply *TxIDForBlockReply) error {
if err := ratelimit.Limit(bitmark.Limiter); nil != err {
return err
}
log := bitmark.Log
log.Infof("BlockOwner.TxIDForBlock: %+v", info)
if bitmark.Pool == nil {
return fault.DatabaseIsNotSet
}
blockNumberKey := make([]byte, 8)
binary.BigEndian.PutUint64(blockNumberKey, info.BlockNumber)
packedBlock := bitmark.Pool.Get(blockNumberKey)
if nil == packedBlock {
return fault.BlockNotFound
}
header, digest, _, err := bitmark.Br.ExtractHeader(packedBlock, 0, false)
if nil != err {
return err
}
reply.TxId = blockrecord.FoundationTxId(header.Number, digest)
return nil
}
// Block owner transfer
// --------------------
// TransferReply - results of transferring block ownership
type TransferReply struct {
TxId merkle.Digest `json:"txId"`
PayId pay.PayId `json:"payId"`
Payments map[string]transactionrecord.PaymentAlternative `json:"payments"`
}
// Transfer - transfer the ownership of a block to new account and/or
// payment addresses
func (bitmark *BlockOwner) Transfer(transfer *transactionrecord.BlockOwnerTransfer, reply *TransferReply) error {
if err := ratelimit.Limit(bitmark.Limiter); nil != err {
return err
}
log := bitmark.Log
log.Infof("BlockOwner.Transfer: %+v", transfer)
if !bitmark.IsNormalMode(mode.Normal) {
return fault.NotAvailableDuringSynchronise
}
if transfer.Owner.IsTesting() != bitmark.IsTestingChain() {
return fault.WrongNetworkForPublicKey
}
// save transfer/check for duplicate
stored, duplicate, err := bitmark.Rsvr.StoreTransfer(transfer)
if nil != err {
return err
}
// only first result needs to be considered
payId := stored.Id
txId := stored.TxId
packedTransfer := stored.Packed
log.Infof("id: %v", txId)
reply.TxId = txId
reply.PayId = payId
reply.Payments = make(map[string]transactionrecord.PaymentAlternative)
for _, payment := range stored.Payments {
c := payment[0].Currency.String()
reply.Payments[c] = payment
}
// announce transaction block to other peers
if !duplicate {
messagebus.Bus.Broadcast.Send("transfer", packedTransfer)
}
return nil
}
|
package netxlite
import (
"context"
"crypto/tls"
"errors"
"io"
"net"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/ooni/probe-cli/v3/internal/netxmocks"
)
func TestTLSDialerFailureSplitHostPort(t *testing.T) {
dialer := &TLSDialer{}
ctx := context.Background()
const address = "www.google.com" // missing port
conn, err := dialer.DialTLSContext(ctx, "tcp", address)
if err == nil || !strings.HasSuffix(err.Error(), "missing port in address") {
t.Fatal("not the error we expected", err)
}
if conn != nil {
t.Fatal("connection is not nil")
}
}
func TestTLSDialerFailureDialing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel() // immediately fail
dialer := TLSDialer{Dialer: &net.Dialer{}}
conn, err := dialer.DialTLSContext(ctx, "tcp", "www.google.com:443")
if err == nil || !strings.HasSuffix(err.Error(), "operation was canceled") {
t.Fatal("not the error we expected", err)
}
if conn != nil {
t.Fatal("connection is not nil")
}
}
func TestTLSDialerFailureHandshaking(t *testing.T) {
ctx := context.Background()
dialer := TLSDialer{
Config: &tls.Config{},
Dialer: &netxmocks.Dialer{MockDialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
return &netxmocks.Conn{MockWrite: func(b []byte) (int, error) {
return 0, io.EOF
}, MockClose: func() error {
return nil
}, MockSetDeadline: func(t time.Time) error {
return nil
}}, nil
}},
TLSHandshaker: &TLSHandshakerConfigurable{},
}
conn, err := dialer.DialTLSContext(ctx, "tcp", "www.google.com:443")
if !errors.Is(err, io.EOF) {
t.Fatal("not the error we expected", err)
}
if conn != nil {
t.Fatal("connection is not nil")
}
}
func TestTLSDialerSuccessHandshaking(t *testing.T) {
ctx := context.Background()
dialer := TLSDialer{
Dialer: &netxmocks.Dialer{MockDialContext: func(ctx context.Context, network, address string) (net.Conn, error) {
return &netxmocks.Conn{MockWrite: func(b []byte) (int, error) {
return 0, io.EOF
}, MockClose: func() error {
return nil
}, MockSetDeadline: func(t time.Time) error {
return nil
}}, nil
}},
TLSHandshaker: &netxmocks.TLSHandshaker{
MockHandshake: func(ctx context.Context, conn net.Conn, config *tls.Config) (net.Conn, tls.ConnectionState, error) {
return tls.Client(conn, config), tls.ConnectionState{}, nil
},
},
}
conn, err := dialer.DialTLSContext(ctx, "tcp", "www.google.com:443")
if err != nil {
t.Fatal(err)
}
if conn == nil {
t.Fatal("connection is nil")
}
conn.Close()
}
func TestTLSDialerConfigFromEmptyConfigForWeb(t *testing.T) {
d := &TLSDialer{}
config := d.config("www.google.com", "443")
if config.ServerName != "www.google.com" {
t.Fatal("invalid server name")
}
if diff := cmp.Diff(config.NextProtos, []string{"h2", "http/1.1"}); diff != "" {
t.Fatal(diff)
}
}
func TestTLSDialerConfigFromEmptyConfigForDoT(t *testing.T) {
d := &TLSDialer{}
config := d.config("dns.google", "853")
if config.ServerName != "dns.google" {
t.Fatal("invalid server name")
}
if diff := cmp.Diff(config.NextProtos, []string{"dot"}); diff != "" {
t.Fatal(diff)
}
}
func TestTLSDialerConfigWithServerName(t *testing.T) {
d := &TLSDialer{
Config: &tls.Config{
ServerName: "example.com",
},
}
config := d.config("dns.google", "853")
if config.ServerName != "example.com" {
t.Fatal("invalid server name")
}
if diff := cmp.Diff(config.NextProtos, []string{"dot"}); diff != "" {
t.Fatal(diff)
}
}
func TestTLSDialerConfigWithALPN(t *testing.T) {
d := &TLSDialer{
Config: &tls.Config{
NextProtos: []string{"h2"},
},
}
config := d.config("dns.google", "853")
if config.ServerName != "dns.google" {
t.Fatal("invalid server name")
}
if diff := cmp.Diff(config.NextProtos, []string{"h2"}); diff != "" {
t.Fatal(diff)
}
}
|
package buffer
import (
"fmt"
"sort"
"strings"
)
type Buffer struct {
FirstTime float64
Len int
}
type Buffers map[string]Buffer
type ResultData struct {
MaxPacketNum int
AccessCount int
NextAccessTime int
BufMax int
PacketNumAll int
PacketOfAllBuffers int
AccessPerSecList []int
EndFlag bool
}
type Params struct {
CurrentTime float64
PerSec float64
BufSize int
EntrySize int
TimeWidth float64
Stupid bool
Protocol string
SelectedPort []string
}
func (buf Buffers) Append(bufOrderList []string, params Params, fivetuple string, result ResultData) (Buffers, []string, ResultData) {
_, ok := buf[fivetuple]
result.PacketOfAllBuffers++
if !ok {
buf[fivetuple] = Buffer{params.CurrentTime, 1}
bufOrderList = append(bufOrderList, fivetuple)
} else {
b := buf[fivetuple]
buf[fivetuple] = Buffer{b.FirstTime, b.Len + 1}
if len(bufOrderList) > result.BufMax {
result.BufMax = len(bufOrderList)
}
}
return buf, bufOrderList, result
}
func batchProcessing(buf Buffers, bufOrderList []string, params Params, result ResultData) (Buffers, []string, Params, ResultData) {
sortedMap := List{}
if params.Stupid == false {
sortedMap = buf.getSortedMap(params.BufSize)
} else {
sortedMap = buf.getStupidMap(bufOrderList, params)
}
reducing := sortedMap.getListSum(params)
if result.PacketOfAllBuffers < reducing {
panic(fmt.Errorf("error: reducing(%d) is more than packet of all buffers(%d)%d", reducing, result.PacketOfAllBuffers, len(sortedMap)))
}
accessCount := result.PacketOfAllBuffers - reducing
result.AccessCount += accessCount
result.AccessPerSecList[len(result.AccessPerSecList)-1] += accessCount
bufOrderList = []string{}
buf = Buffers{}
result.PacketOfAllBuffers = 0
result.NextAccessTime = int(params.CurrentTime/params.TimeWidth) + 1
return buf, bufOrderList, params, result
}
func (buf Buffers) CheckAck(fiveTuple string, bufOrderList []string, params Params, result ResultData) (Buffers, []string, ResultData) {
list := strings.Split(fiveTuple, " ")
ack := strings.Join(append(append(list[2:4], list[0:2]...), list[4]), " ")
_, ok := buf[ack]
if ok {
accessCount := int(float64(buf[ack].Len) / float64(params.EntrySize))
result.AccessCount += accessCount
result.AccessPerSecList[len(result.AccessPerSecList)-1] += accessCount
result.PacketOfAllBuffers -= buf[ack].Len
bufOrderList = deleteList(bufOrderList, ack)
delete(buf, ack)
}
return buf, bufOrderList, result
}
func (buf Buffers) EndProcessing(bufOrderList []string, params Params, result ResultData) {
buf, bufOrderList, params, result = batchProcessing(buf, bufOrderList, params, result)
}
func (buf Buffers) CheckGlobalTime(bufOrderList []string, params Params, result ResultData) (Buffers, []string, ResultData) {
if params.BufSize > len(buf) {
params.BufSize = len(buf)
}
if params.CurrentTime/params.TimeWidth > float64(result.NextAccessTime) || result.EndFlag == true {
buf, bufOrderList, params, result = batchProcessing(buf, bufOrderList, params, result)
}
if params.CurrentTime > float64(len(result.AccessPerSecList))*params.PerSec {
result.AccessPerSecList = append(result.AccessPerSecList, 0)
}
return buf, bufOrderList, result
}
func (buf Buffers) CheckGlobalTimeWithUnlimitedBuffers(bufOrderList []string, params Params, result ResultData) (Buffers, []string, ResultData) {
if params.CurrentTime > float64(result.NextAccessTime)*params.TimeWidth || result.EndFlag == true {
result.AccessCount += len(bufOrderList)
result.AccessPerSecList[len(result.AccessPerSecList)-1] += len(bufOrderList)
bufOrderList = []string{}
buf = Buffers{}
result.NextAccessTime = int(params.CurrentTime)*100 + 1
}
if params.CurrentTime > float64(len(result.AccessPerSecList))*params.PerSec {
result.AccessPerSecList = append(result.AccessPerSecList, 0)
}
return buf, bufOrderList, result
}
//for get sorted map
type Entry struct {
name string
value int
}
type List []Entry
//using stupid simulation
func (buf Buffers) getStupidMap(bufOrderList []string, params Params) List {
sortedMap := List{}
count := 0
for _, k := range bufOrderList {
element := Entry{k, buf[k].Len}
if FiveTupleContains(k, params) {
sortedMap = append(sortedMap, element)
count++
}
if count == params.BufSize {
break
}
}
return sortedMap
}
func (buf Buffers) getSortedMap(bufSize int) List {
sortedMap := List{}
for k, v := range buf {
element := Entry{k, v.Len}
sortedMap = append(sortedMap, element)
}
sort.Sort(sort.Reverse(sortedMap))
return sortedMap[:bufSize]
}
func (l List) Len() int {
return len(l)
}
func (l List) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l List) Less(i, j int) bool {
if l[i].value == l[j].value {
return (l[i].name < l[j].name)
} else {
return (l[i].value < l[j].value)
}
}
func (l List) getListSum(params Params) int {
sum := 0
for _, v := range l {
sum += int(float64(v.value) * (float64(params.EntrySize-1) / float64(params.EntrySize)))
}
return sum
}
func deleteList(l []string, s string) []string {
for i, v := range l {
if v == s {
l := append(l[:i], l[i+1:]...)
n := make([]string, len(s))
copy(n, l)
return l
}
}
return l
}
func FiveTupleContains(fiveTuple string, params Params) bool {
if params.SelectedPort[0] == "" {
return true
}
List := strings.Split(fiveTuple, " ")
if params.Protocol != List[4] {
return false
}
for _, v := range params.SelectedPort {
if v == strings.Split(List[1], "(")[0] || v == strings.Split(List[3], "(")[0] {
return true
}
}
return false
}
|
package chronos
type Container struct {
Type string `json:"type"`
Image string `json:"image"`
Network string `json:"network"`
}
// NewContainer creates a new Container assignment
func NewContainer(image string) *Container {
return &Container{
Type: "DOCKER",
Image: image,
Network: "BRIDGE",
}
}
|
package util
import "log"
import "fmt"
import "time"
// Debugging
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 { log.Printf(format, a...) }
return
}
func Log(format string, args ...interface{}) {
nowStr := time.Now().Format("15:04:05.000")
s := fmt.Sprintf("%s LOG: ", nowStr)
s += fmt.Sprintf(format, args...)
fmt.Printf("%s", s)
}
func internal() {
time.Now()
fmt.Printf("__internal__")
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package auditor
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/sassoftware/relic/v7/lib/audit"
)
func logGraylog(info *audit.Info, rowid int64) error {
if auditConfig.GraylogURL == "" {
return nil
}
msg := map[string]interface{}{
"version": "1.1",
"host": info.Attributes["sig.hostname"],
"short_message": fmtRow(info, rowid),
"level": 6, // INFO
}
if timestamp, err := time.Parse(time.RFC3339Nano, info.Attributes["sig.timestamp"].(string)); err == nil {
msg["timestamp"] = timestamp.Unix()
}
for k, v := range info.Attributes {
if v == nil {
continue
}
// graylog quietly changes dots to underscores, but only after running
// stream filters. that gets confusing real quickly so change it to
// underscore now.
k = strings.ReplaceAll(k, ".", "_")
msg["_"+k] = v
}
blob, err := json.Marshal(msg)
if err != nil {
return err
}
resp, err := http.Post(auditConfig.GraylogURL, "application/json", bytes.NewReader(blob))
if err != nil {
return err
} else if resp.StatusCode >= 300 {
return fmt.Errorf("posting to graylog: %s", resp.Status)
}
resp.Body.Close()
return nil
}
|
package sequencing
func LCSStrings(a []string, b []string, v0 []int, v1 []int) int {
m := len(a)
n := len(b)
if v0 == nil {
v0 = make([]int, n+1, n+1)
}
if v1 == nil {
v1 = make([]int, n+1, n+1)
}
for i := 0; i < m; i++ {
v1[0] = 0
for j := 0; j < n; j++ {
if a[i] == b[j] {
v1[j+1] = v0[j] + 1
} else {
v1[j+1] = max(v1[j], v0[j+1])
}
}
temp := v0
v0 = v1
v1 = temp
}
return v0[n]
}
func LCS(a []byte, b []byte, v0 []int, v1 []int) int {
m := len(a)
n := len(b)
if v0 == nil {
v0 = make([]int, n+1, n+1)
}
if v1 == nil {
v1 = make([]int, n+1, n+1)
}
for i := 0; i < m; i++ {
v1[0] = 0
for j := 0; j < n; j++ {
if a[i] == b[j] {
v1[j+1] = v0[j] + 1
} else {
v1[j+1] = max(v1[j], v0[j+1])
}
}
temp := v0
v0 = v1
v1 = temp
}
return v0[n]
}
|
package command
import (
"fmt"
"regexp"
"time"
)
const (
Beer = "\U0001f37a"
Clock = "\U000023f0"
StartingHour = 18
)
var queryRegexp *regexp.Regexp
func init() {
queryRegexp = regexp.MustCompile(`(?i)time|long|til|left|remaining|eta`)
}
type BeerOClockCommand struct {
name string
pattern *regexp.Regexp
}
func BeerOClock() BeerOClockCommand {
return BeerOClockCommand{
"beer",
regexp.MustCompile(`(?i)beer\s*(.*)`),
}
}
func (c BeerOClockCommand) Name() string {
return c.name
}
func (c BeerOClockCommand) Pattern() *regexp.Regexp {
return c.pattern
}
func (c BeerOClockCommand) Help() string {
return c.name + " – is it beer o'clock yet?"
}
func (c BeerOClockCommand) Usage() []string {
return []string{
c.name + " – tells if it's beer o'clock",
c.name + " ETA – tells how long until beer o'clock",
}
}
func (c BeerOClockCommand) Run(query string) []string {
now := time.Now()
hour := now.Hour()
beerOclock := hour >= StartingHour
beerOclockEmoji := fmt.Sprintf("%s%s", Beer, Clock)
var result string
if queryRegexp.MatchString(query) {
hourDiff := StartingHour - hour - 1
minuteDiff := 60 - now.Minute()
secondDiff := 60 - now.Second()
if beerOclock {
result = fmt.Sprintf("It's already %s! Enjoy!", beerOclockEmoji)
} else {
result = fmt.Sprintf(
"%s in %d hour(s), %d minute(s) and %d second(s)",
beerOclockEmoji,
hourDiff,
minuteDiff,
secondDiff)
}
} else if beerOclock {
result = fmt.Sprintf("YES! %s%s%s", Beer, Beer, Beer)
} else {
result = "Not yet. :("
}
return []string{result}
}
|
package gui
import (
"fmt"
"log"
"strings"
"github.com/fatih/color"
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazynpm/pkg/utils"
)
// Binding - a keybinding mapping a key and modifier to a handler. The keypress
// is only handled if the given view has focus, or handled globally if the view
// is ""
type Binding struct {
ViewName string
Contexts []string
Handler func(*gocui.Gui, *gocui.View) error
Key interface{} // FIXME: find out how to get `gocui.Key | rune`
Modifier gocui.Modifier
Description string
Alternative string
}
// GetDisplayStrings returns the display string of a file
func (b *Binding) GetDisplayStrings(isFocused bool) []string {
return []string{GetKeyDisplay(b.Key), b.Description}
}
var keyMapReversed = map[gocui.Key]string{
gocui.KeyF1: "f1",
gocui.KeyF2: "f2",
gocui.KeyF3: "f3",
gocui.KeyF4: "f4",
gocui.KeyF5: "f5",
gocui.KeyF6: "f6",
gocui.KeyF7: "f7",
gocui.KeyF8: "f8",
gocui.KeyF9: "f9",
gocui.KeyF10: "f10",
gocui.KeyF11: "f11",
gocui.KeyF12: "f12",
gocui.KeyInsert: "insert",
gocui.KeyDelete: "delete",
gocui.KeyHome: "home",
gocui.KeyEnd: "end",
gocui.KeyPgup: "pgup",
gocui.KeyPgdn: "pgdown",
gocui.KeyArrowUp: "▲",
gocui.KeyArrowDown: "▼",
gocui.KeyArrowLeft: "◄",
gocui.KeyArrowRight: "►",
gocui.KeyTab: "tab", // ctrl+i
gocui.KeyEnter: "enter", // ctrl+m
gocui.KeyEsc: "esc", // ctrl+[, ctrl+3
gocui.KeyBackspace: "backspace", // ctrl+h
gocui.KeyCtrlSpace: "ctrl+space", // ctrl+~, ctrl+2
gocui.KeyCtrlSlash: "ctrl+/", // ctrl+_
gocui.KeySpace: "space",
gocui.KeyCtrlA: "ctrl+a",
gocui.KeyCtrlB: "ctrl+b",
gocui.KeyCtrlC: "ctrl+c",
gocui.KeyCtrlD: "ctrl+d",
gocui.KeyCtrlE: "ctrl+e",
gocui.KeyCtrlF: "ctrl+f",
gocui.KeyCtrlG: "ctrl+g",
gocui.KeyCtrlJ: "ctrl+j",
gocui.KeyCtrlK: "ctrl+k",
gocui.KeyCtrlL: "ctrl+l",
gocui.KeyCtrlN: "ctrl+n",
gocui.KeyCtrlO: "ctrl+o",
gocui.KeyCtrlP: "ctrl+p",
gocui.KeyCtrlQ: "ctrl+q",
gocui.KeyCtrlR: "ctrl+r",
gocui.KeyCtrlS: "ctrl+s",
gocui.KeyCtrlT: "ctrl+t",
gocui.KeyCtrlU: "ctrl+u",
gocui.KeyCtrlV: "ctrl+v",
gocui.KeyCtrlW: "ctrl+w",
gocui.KeyCtrlX: "ctrl+x",
gocui.KeyCtrlY: "ctrl+y",
gocui.KeyCtrlZ: "ctrl+z",
gocui.KeyCtrl4: "ctrl+4", // ctrl+\
gocui.KeyCtrl5: "ctrl+5", // ctrl+]
gocui.KeyCtrl6: "ctrl+6",
gocui.KeyCtrl8: "ctrl+8",
}
var keymap = map[string]interface{}{
"<c-a>": gocui.KeyCtrlA,
"<c-b>": gocui.KeyCtrlB,
"<c-c>": gocui.KeyCtrlC,
"<c-d>": gocui.KeyCtrlD,
"<c-e>": gocui.KeyCtrlE,
"<c-f>": gocui.KeyCtrlF,
"<c-g>": gocui.KeyCtrlG,
"<c-h>": gocui.KeyCtrlH,
"<c-i>": gocui.KeyCtrlI,
"<c-j>": gocui.KeyCtrlJ,
"<c-k>": gocui.KeyCtrlK,
"<c-l>": gocui.KeyCtrlL,
"<c-m>": gocui.KeyCtrlM,
"<c-n>": gocui.KeyCtrlN,
"<c-o>": gocui.KeyCtrlO,
"<c-p>": gocui.KeyCtrlP,
"<c-q>": gocui.KeyCtrlQ,
"<c-r>": gocui.KeyCtrlR,
"<c-s>": gocui.KeyCtrlS,
"<c-t>": gocui.KeyCtrlT,
"<c-u>": gocui.KeyCtrlU,
"<c-v>": gocui.KeyCtrlV,
"<c-w>": gocui.KeyCtrlW,
"<c-x>": gocui.KeyCtrlX,
"<c-y>": gocui.KeyCtrlY,
"<c-z>": gocui.KeyCtrlZ,
"<c-~>": gocui.KeyCtrlTilde,
"<c-2>": gocui.KeyCtrl2,
"<c-3>": gocui.KeyCtrl3,
"<c-4>": gocui.KeyCtrl4,
"<c-5>": gocui.KeyCtrl5,
"<c-6>": gocui.KeyCtrl6,
"<c-7>": gocui.KeyCtrl7,
"<c-8>": gocui.KeyCtrl8,
"<c-space>": gocui.KeyCtrlSpace,
"<c-\\>": gocui.KeyCtrlBackslash,
"<c-[>": gocui.KeyCtrlLsqBracket,
"<c-]>": gocui.KeyCtrlRsqBracket,
"<c-/>": gocui.KeyCtrlSlash,
"<c-_>": gocui.KeyCtrlUnderscore,
"<backspace>": gocui.KeyBackspace,
"<tab>": gocui.KeyTab,
"<enter>": gocui.KeyEnter,
"<esc>": gocui.KeyEsc,
"<space>": gocui.KeySpace,
"<f1>": gocui.KeyF1,
"<f2>": gocui.KeyF2,
"<f3>": gocui.KeyF3,
"<f4>": gocui.KeyF4,
"<f5>": gocui.KeyF5,
"<f6>": gocui.KeyF6,
"<f7>": gocui.KeyF7,
"<f8>": gocui.KeyF8,
"<f9>": gocui.KeyF9,
"<f10>": gocui.KeyF10,
"<f11>": gocui.KeyF11,
"<f12>": gocui.KeyF12,
"<insert>": gocui.KeyInsert,
"<delete>": gocui.KeyDelete,
"<home>": gocui.KeyHome,
"<end>": gocui.KeyEnd,
"<pgup>": gocui.KeyPgup,
"<pgdown>": gocui.KeyPgdn,
"<up>": gocui.KeyArrowUp,
"<down>": gocui.KeyArrowDown,
"<left>": gocui.KeyArrowLeft,
"<right>": gocui.KeyArrowRight,
}
func (gui *Gui) getKeyDisplay(name string) string {
key := gui.getKey(name)
return GetKeyDisplay(key)
}
func GetKeyDisplay(key interface{}) string {
keyInt := 0
switch key := key.(type) {
case rune:
keyInt = int(key)
case gocui.Key:
value, ok := keyMapReversed[key]
if ok {
return value
}
keyInt = int(key)
}
return string(keyInt)
}
func (gui *Gui) getKey(name string) interface{} {
key := gui.Config.GetUserConfig().GetString("keybinding." + name)
if len(key) > 1 {
binding := keymap[strings.ToLower(key)]
if binding == nil {
log.Fatalf("Unrecognized key %s for keybinding %s", strings.ToLower(key), name)
} else {
return binding
}
} else if len(key) == 1 {
return []rune(key)[0]
}
log.Fatal("Key empty for keybinding: " + strings.ToLower(name))
return nil
}
// GetInitialKeybindings is a function.
func (gui *Gui) GetInitialKeybindings() []*Binding {
bindings := []*Binding{
{
ViewName: "",
Key: gui.getKey("universal.quit"),
Handler: gui.handleQuit,
},
{
ViewName: "",
Key: gui.getKey("universal.quitWithoutChangingDirectory"),
Handler: gui.handleQuitWithoutChangingDirectory,
},
{
ViewName: "",
Key: gui.getKey("universal.quit-alt1"),
Handler: gui.handleQuit,
},
{
ViewName: "",
Key: gui.getKey("universal.return"),
Handler: gui.handleQuit,
},
{
ViewName: "",
Key: gui.getKey("universal.scrollUpMain"),
Handler: gui.scrollUpMain,
Alternative: "fn+up",
Description: gui.Tr.SLocalize("scrollUpMainPanel"),
},
{
ViewName: "",
Key: gui.getKey("universal.scrollDownMain"),
Handler: gui.scrollDownMain,
Alternative: "fn+down",
Description: gui.Tr.SLocalize("scrollDownMainPanel"),
},
{
ViewName: "",
Key: gui.getKey("universal.scrollUpMain-alt1"),
Handler: gui.scrollUpMain,
},
{
ViewName: "",
Key: gui.getKey("universal.scrollDownMain-alt1"),
Handler: gui.scrollDownMain,
},
{
ViewName: "",
Key: gui.getKey("universal.scrollUpMain-alt2"),
Handler: gui.scrollUpMain,
},
{
ViewName: "",
Key: gui.getKey("universal.scrollDownMain-alt2"),
Handler: gui.scrollDownMain,
},
{
ViewName: "",
Key: gui.getKey("universal.refresh"),
Handler: gui.handleRefresh,
Description: gui.Tr.SLocalize("refresh"),
},
{
ViewName: "",
Key: gui.getKey("universal.optionMenu"),
Handler: gui.handleCreateOptionsMenu,
Description: gui.Tr.SLocalize("openMenu"),
},
{
ViewName: "",
Key: gui.getKey("universal.optionMenu-alt1"),
Handler: gui.handleCreateOptionsMenu,
},
{
ViewName: "",
Key: gocui.MouseMiddle,
Handler: gui.handleCreateOptionsMenu,
},
{
ViewName: "",
Key: gui.getKey("universal.kill"),
Handler: gui.wrappedHandler(gui.handleKillCommand),
Description: "kill running command",
},
{
ViewName: "status",
Key: gui.getKey("universal.edit"),
Handler: gui.handleEditConfig,
Description: gui.Tr.SLocalize("EditConfig"),
},
{
ViewName: "",
Key: gui.getKey("universal.nextScreenMode"),
Handler: gui.nextScreenMode,
Description: gui.Tr.SLocalize("nextScreenMode"),
},
{
ViewName: "",
Key: gui.getKey("universal.prevScreenMode"),
Handler: gui.prevScreenMode,
Description: gui.Tr.SLocalize("prevScreenMode"),
},
{
ViewName: "status",
Key: gui.getKey("universal.openFile"),
Handler: gui.handleOpenConfig,
Description: gui.Tr.SLocalize("OpenConfig"),
},
{
ViewName: "status",
Key: gui.getKey("status.checkForUpdate"),
Handler: gui.handleCheckForUpdate,
Description: gui.Tr.SLocalize("checkForUpdate"),
},
{
ViewName: "menu",
Key: gui.getKey("universal.return"),
Handler: gui.handleMenuClose,
Description: gui.Tr.SLocalize("closeMenu"),
},
{
ViewName: "menu",
Key: gui.getKey("universal.quit"),
Handler: gui.handleMenuClose,
Description: gui.Tr.SLocalize("closeMenu"),
},
{
ViewName: "information",
Key: gocui.MouseLeft,
Handler: gui.handleInfoClick,
},
{
ViewName: "status",
Key: gocui.MouseLeft,
Handler: gui.handleStatusClick,
},
{
ViewName: "search",
Key: gocui.KeyEnter,
Handler: gui.handleSearch,
},
{
ViewName: "search",
Key: gui.getKey("universal.return"),
Handler: gui.handleSearchEscape,
},
{
ViewName: "confirmation",
Key: gui.getKey("universal.prevItem"),
Handler: gui.scrollUpConfirmationPanel,
},
{
ViewName: "confirmation",
Key: gui.getKey("universal.nextItem"),
Handler: gui.scrollDownConfirmationPanel,
},
{
ViewName: "confirmation",
Key: gui.getKey("universal.prevItem-alt"),
Handler: gui.scrollUpConfirmationPanel,
},
{
ViewName: "confirmation",
Key: gui.getKey("universal.nextItem-alt"),
Handler: gui.scrollDownConfirmationPanel,
},
{
ViewName: "packages",
Key: gui.getKey("universal.select"),
Handler: gui.wrappedPackageHandler(gui.handleCheckoutPackage),
},
{
ViewName: "packages",
Key: gui.getKey("packages.publish"),
Handler: gui.wrappedPackageHandler(gui.handlePublishPackage),
},
{
ViewName: "packages",
Key: gui.getKey("universal.new"),
Handler: gui.wrappedHandler(gui.handleAddPackage),
Description: "add package to list",
},
{
ViewName: "packages",
Key: gui.getKey("packages.pack"),
Handler: gui.wrappedPackageHandler(gui.handlePackPackage),
Description: fmt.Sprintf("%s package", utils.ColoredString("`npm pack`", color.FgYellow)),
},
{
ViewName: "packages",
Key: gui.getKey("packages.link"),
Handler: gui.wrappedHandler(gui.handleLinkPackage),
Description: fmt.Sprintf("%s (or unlink if already linked)", utils.ColoredString("`npm link <package>`", color.FgYellow)),
},
{
ViewName: "packages",
Key: gui.getKey("packages.globalLink"),
Handler: gui.wrappedPackageHandler(gui.handleGlobalLinkPackage),
Description: fmt.Sprintf("%s (i.e. globally link) (or unlink if already linked)", utils.ColoredString("`npm link`", color.FgYellow)),
},
{
ViewName: "packages",
Key: gui.getKey("universal.remove"),
Handler: gui.wrappedPackageHandler(gui.handleRemovePackage),
Description: "remove package from list",
},
{
ViewName: "packages",
Key: gui.getKey("universal.install"),
Handler: gui.wrappedPackageHandler(gui.handleInstall),
Description: fmt.Sprintf("%s package", utils.ColoredString("`npm install`", color.FgYellow)),
},
{
ViewName: "packages",
Key: gui.getKey("packages.build"),
Handler: gui.wrappedPackageHandler(gui.handleBuild),
Description: fmt.Sprintf("%s package", utils.ColoredString("`npm run build`", color.FgYellow)),
},
{
ViewName: "packages",
Key: gui.getKey("universal.openFile"),
Handler: gui.wrappedPackageHandler(gui.handleOpenPackageConfig),
Description: "open package.json",
},
{
ViewName: "packages",
Key: gui.getKey("universal.update"),
Handler: gui.wrappedPackageHandler(gui.handlePackageUpdate),
Description: fmt.Sprintf("%s package", utils.ColoredString("`npm update`", color.FgYellow)),
},
{
ViewName: "scripts",
Key: gui.getKey("universal.select"),
Handler: gui.wrappedScriptHandler(gui.handleRunScript),
Description: fmt.Sprintf("%s script", utils.ColoredString("`npm run`", color.FgYellow)),
},
{
ViewName: "scripts",
Key: gui.getKey("universal.remove"),
Handler: gui.wrappedScriptHandler(gui.handleRemoveScript),
Description: "remove script from package.json",
},
{
ViewName: "scripts",
Key: gui.getKey("universal.edit"),
Handler: gui.wrappedScriptHandler(gui.handleEditScript),
Description: "edit script",
},
{
ViewName: "scripts",
Key: gui.getKey("universal.new"),
Handler: gui.wrappedHandler(gui.handleAddScript),
Description: "add script",
},
{
ViewName: "deps",
Key: gui.getKey("universal.install"),
Handler: gui.wrappedDependencyHandler(gui.handleDepInstall),
Description: fmt.Sprintf("%s dependency", utils.ColoredString("`npm install`", color.FgYellow)),
},
{
ViewName: "deps",
Key: gui.getKey("universal.openFile"),
Handler: gui.wrappedDependencyHandler(gui.handleOpenDepPackageConfig),
Description: "open package.json",
},
{
ViewName: "deps",
Key: gui.getKey("universal.update"),
Handler: gui.wrappedDependencyHandler(gui.handleDepUpdate),
Description: fmt.Sprintf("%s dependency", utils.ColoredString("`npm update`", color.FgYellow)),
},
{
ViewName: "deps",
Key: gui.getKey("universal.remove"),
Handler: gui.wrappedDependencyHandler(gui.handleDepUninstall),
Description: fmt.Sprintf("%s dependency", utils.ColoredString("`npm uninstall`", color.FgYellow)),
},
{
ViewName: "deps",
Key: gui.getKey("dependencies.changeType"),
Handler: gui.wrappedDependencyHandler(gui.handleChangeDepType),
Description: "change dependency type (prod/dev/optional)",
},
{
ViewName: "deps",
Key: gui.getKey("universal.new"),
Handler: gui.wrappedDependencyHandler(gui.handleAddDependency),
Description: fmt.Sprintf("%s new dependency", utils.ColoredString("`npm install`", color.FgYellow)),
},
{
ViewName: "deps",
Key: gui.getKey("universal.edit"),
Handler: gui.wrappedDependencyHandler(gui.handleEditDepConstraint),
Description: "edit dependency constraint",
},
{
ViewName: "tarballs",
Key: gui.getKey("universal.remove"),
Handler: gui.wrappedTarballHandler(gui.handleDeleteTarball),
Description: "delete tarball",
},
{
ViewName: "tarballs",
Key: gui.getKey("universal.install"),
Handler: gui.wrappedTarballHandler(gui.handleInstallTarball),
Description: fmt.Sprintf("%s tarball", utils.ColoredString("`npm install`", color.FgYellow)),
},
{
ViewName: "tarballs",
Key: gui.getKey("packages.publish"),
Handler: gui.wrappedTarballHandler(gui.handlePublishTarball),
Description: fmt.Sprintf("%s tarball", utils.ColoredString("`npm publish`", color.FgYellow)),
},
}
for _, viewName := range []string{"status", "packages", "deps", "scripts", "tarballs", "menu"} {
bindings = append(bindings, []*Binding{
{ViewName: viewName, Key: gui.getKey("universal.togglePanel"), Handler: gui.nextView},
{ViewName: viewName, Key: gui.getKey("universal.prevBlock"), Handler: gui.previousView},
{ViewName: viewName, Key: gui.getKey("universal.nextBlock"), Handler: gui.nextView},
{ViewName: viewName, Key: gui.getKey("universal.prevBlock-alt"), Handler: gui.previousView},
{ViewName: viewName, Key: gui.getKey("universal.nextBlock-alt"), Handler: gui.nextView},
}...)
}
// Appends keybindings to jump to a particular sideView using numbers
for i, viewName := range []string{"status", "packages", "deps", "scripts", "tarballs"} {
bindings = append(bindings, &Binding{ViewName: "", Key: rune(i+1) + '0', Handler: gui.goToSideView(viewName)})
bindings = append(bindings, &Binding{ViewName: viewName, Key: gui.getKey("universal.goInto"), Handler: gui.wrappedHandler(gui.enterMainView)})
}
for _, listView := range gui.getListViews() {
bindings = append(bindings, []*Binding{
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.prevItem-alt"), Handler: listView.handlePrevLine},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.prevItem"), Handler: listView.handlePrevLine},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gocui.MouseWheelUp, Handler: listView.handlePrevLine},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.nextItem-alt"), Handler: listView.handleNextLine},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.nextItem"), Handler: listView.handleNextLine},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.prevPage"), Handler: listView.handlePrevPage, Description: gui.Tr.SLocalize("prevPage")},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.nextPage"), Handler: listView.handleNextPage, Description: gui.Tr.SLocalize("nextPage")},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gui.getKey("universal.gotoTop"), Handler: listView.handleGotoTop, Description: gui.Tr.SLocalize("gotoTop")},
{
ViewName: listView.viewName,
Contexts: []string{listView.context},
Key: gui.getKey("universal.gotoBottom"),
Handler: listView.handleGotoBottom,
Description: gui.Tr.SLocalize("gotoBottom"),
},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gocui.MouseWheelDown, Handler: listView.handleNextLine},
{ViewName: listView.viewName, Contexts: []string{listView.context}, Key: gocui.MouseLeft, Handler: listView.handleClick},
{
ViewName: listView.viewName,
Contexts: []string{listView.context},
Key: gui.getKey("universal.startSearch"),
Handler: gui.handleOpenSearch,
Description: gui.Tr.SLocalize("startSearch"),
},
}...)
}
return bindings
}
func (gui *Gui) keybindings(g *gocui.Gui) error {
bindings := gui.GetInitialKeybindings()
for _, binding := range bindings {
if err := g.SetKeybinding(binding.ViewName, binding.Contexts, binding.Key, binding.Modifier, binding.Handler); err != nil {
return err
}
}
tabClickBindings := map[string]func(int) error{
// none yet
}
for viewName, binding := range tabClickBindings {
if err := g.SetTabClickBinding(viewName, binding); err != nil {
return err
}
}
return nil
}
|
package url
import (
"github.com/pkg/errors"
"github.com/spf13/afero"
)
func (installer Installer) unpackOneAgentZip(targetDir string, tmpFile afero.File) error {
var fileSize int64
if stat, err := tmpFile.Stat(); err == nil {
fileSize = stat.Size()
}
log.Info("saved OneAgent package", "dest", tmpFile.Name(), "size", fileSize)
log.Info("unzipping OneAgent package")
if err := installer.extractor.ExtractZip(tmpFile, targetDir); err != nil {
log.Info("failed to unzip OneAgent package", "err", err)
return errors.WithStack(err)
}
log.Info("unzipped OneAgent package")
return nil
}
|
// Package transparent is a library that provides transparent operations for key-value stores.
// Transparent Layer is tearable on Stack. In addition to caching, it is also possible to
// transparently use a layer of synchronization between distributed systems.
// See subpackage for implementation.
package transparent
// Stack is stacked layer
type Stack struct {
Layer
all []Layer
}
// NewStack returns Stack
func NewStack() *Stack {
return &Stack{
all: []Layer{},
}
}
// Stack add the layer to Stack
func (s *Stack) Stack(l Layer) error {
if s.Layer != nil {
err := l.setNext(s.Layer)
if err != nil {
return err
}
}
s.Layer = l
s.all = append(s.all, l)
return nil
}
// Start initialize all stacked layers
func (s *Stack) Start() error {
for _, l := range s.all {
err := l.start()
if err != nil {
return err
}
}
return nil
}
// Stop clean up all stacked layers
func (s *Stack) Stop() error {
for _, l := range s.all {
err := l.stop()
if err != nil {
return err
}
}
return nil
}
// Layer is stackable function
type Layer interface {
Set(key interface{}, value interface{}) error
Get(key interface{}) (value interface{}, err error)
Remove(key interface{}) error
Sync() error
setNext(Layer) error
start() error
stop() error
}
// MessageType of operation
type MessageType int
// MessageType of operation
const (
MessageSet MessageType = iota
MessageGet
MessageRemove
MessageSync
)
// Message is layer operation
type Message struct {
Key interface{}
Value interface{}
Message MessageType
UUID string
}
// KeyNotFoundError means specified key is not found in the layer
type KeyNotFoundError struct {
Key interface{}
}
func (e *KeyNotFoundError) Error() string { return "requested key is not found" }
|
package almanack
import (
"net/http"
"github.com/spotlightpa/almanack/internal/aws"
"github.com/spotlightpa/almanack/internal/db"
"github.com/spotlightpa/almanack/internal/github"
"github.com/spotlightpa/almanack/internal/google"
"github.com/spotlightpa/almanack/internal/index"
"github.com/spotlightpa/almanack/internal/mailchimp"
"github.com/spotlightpa/almanack/internal/plausible"
"github.com/spotlightpa/almanack/internal/slack"
)
type Services struct {
arcFeedURL string
MailchimpSignupURL string
NetlifyWebhookSecret string
Client *http.Client
Queries *db.Queries
Tx *db.Txable
github.ContentStore
ImageStore aws.BlobStore
FileStore aws.BlobStore
SlackSocial slack.Client
SlackTech slack.Client
Indexer index.Indexer
NewletterService mailchimp.V3
Gsvc *google.Service
mailchimp.EmailService
Plausible plausible.API
}
|
package main
import (
"fmt"
"github.com/pulumi/pulumi-azure-native/sdk/go/azure/resources"
"github.com/pulumi/pulumi-azure-native/sdk/go/azure/web"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config"
)
const (
resourceGroupName string = "funcy-app-rg"
plan string = "funcy-app-plan"
planOS = "Linux"
planSKUCode = "B1"
planSKU = "Basic"
dockerImage = "abhirockzz/funcy-go"
appName = "funcy-api-backend"
storageConfigName = "WEBSITES_ENABLE_APP_SERVICE_STORAGE"
giphyAPIKeyAppConfigName = "GIPHY_API_KEY"
slackSecretAppConfigName = "SLACK_SIGNING_SECRET"
giphyAPIPulumiConfigName = "giphyapikey"
slackSecretPulumiConfigName = "slacksecret"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
resourceGroup, err := resources.NewResourceGroup(ctx, resourceGroupName, nil)
if err != nil {
return err
}
appSvcPlan, err := web.NewAppServicePlan(ctx, plan, &web.AppServicePlanArgs{
ResourceGroupName: resourceGroup.Name,
Kind: pulumi.String(planOS),
Reserved: pulumi.Bool(true),
Sku: &web.SkuDescriptionArgs{
Name: pulumi.String(planSKUCode),
Tier: pulumi.String(planSKU),
},
})
if err != nil {
return err
}
cfg := config.New(ctx, "")
giphyAPIKey := cfg.RequireSecret(giphyAPIPulumiConfigName)
slackSecret := cfg.RequireSecret(slackSecretPulumiConfigName)
helloApp, err := web.NewWebApp(ctx, appName, &web.WebAppArgs{
ResourceGroupName: resourceGroup.Name,
ServerFarmId: appSvcPlan.ID(),
SiteConfig: &web.SiteConfigArgs{
AppSettings: web.NameValuePairArray{
&web.NameValuePairArgs{
Name: pulumi.String(storageConfigName),
Value: pulumi.String("false"),
},
&web.NameValuePairArgs{
Name: pulumi.String(giphyAPIKeyAppConfigName),
Value: giphyAPIKey,
},
&web.NameValuePairArgs{
Name: pulumi.String(slackSecretAppConfigName),
Value: slackSecret,
},
},
AlwaysOn: pulumi.Bool(true),
LinuxFxVersion: pulumi.String(fmt.Sprintf("%v%v", "DOCKER|", dockerImage)),
},
HttpsOnly: pulumi.Bool(true),
})
if err != nil {
return err
}
ctx.Export("appurl", helloApp.DefaultHostName.ApplyT(func(defaultHostName string) (string, error) {
return fmt.Sprintf("%v%v%v", "https://", defaultHostName, "/api/funcy"), nil
}).(pulumi.StringOutput))
return nil
})
}
|
package main
import (
"flag"
"fmt" // пакет для форматированного ввода вывода
"log" // пакет для логирования
"net/http" // пакет для поддержки HTTP протокола
// пакет для работы с UTF-8 строками
)
func main() {
port := flag.String("port", "3000", "an int")
flag.Parse()
http.HandleFunc("/", HelloServer)
err := http.ListenAndServe(":"+*port, nil) // задаем слушать порт
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
func hello() string {
return "Hello world v0.5.3"
}
func HelloServer(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, hello())
}
|
package private
import (
"github.com/google/go-querystring/query"
"github.com/pkg/errors"
"github.com/potix/gobitflyer/api/types"
"github.com/potix/gobitflyer/client"
)
const (
getBalanceHistoryPath string = "/v1/me/getbalancehistory"
)
type GetBalanceHistoryResponse []*GetBalanceHistoryEvent
type GetBalanceHistoryEvent struct {
Id int64 `json:"id"`
TradeDate string `json:"trade_date"`
ProductCode types.ProductCode `json:"product_code"`
CurrencyCode types.CurrencyCode `json:"currency_code"`
TradeType types.TradeType `json:"trade_type"`
Price float64 `json:"price"`
Amount float64 `json:"amount"`
Quantity float64 `json:"quantity"`
Commission float64 `json:"commission"`
Balance float64 `json:"balance"`
OrderId string `json:"order_id"`
}
type GetBalanceHistoryRequest struct {
Path string `url:"-"`
CurrencyCode types.CurrencyCode `url:"currency_code"`
types.Pagination
}
func (r *GetBalanceHistoryRequest) CreateHTTPRequest(endpoint string) (*client.HTTPRequest, error) {
v, err := query.Values(r)
if err != nil {
return nil, errors.Wrapf(err, "can not create query of get balance history")
}
query := v.Encode()
pathQuery := r.Path + "?" + query
return &client.HTTPRequest {
PathQuery: pathQuery,
URL: endpoint + pathQuery,
Method: "GET",
Headers: make(map[string]string),
Body: nil,
}, nil
}
func NewGetBalanceHistoryRequest(currencyCode types.CurrencyCode, count int64, before int64, after int64) (*GetBalanceHistoryRequest) {
return &GetBalanceHistoryRequest{
Path: getBalanceHistoryPath,
CurrencyCode: currencyCode,
Pagination: types.Pagination {
Count: count,
Before: before,
After: after,
},
}
}
|
// Package goutils contains a collection of useful Golang utility methods and libraries
package goutils
// SliceContains returns true if a slice of strings includes a specific string
func SliceContains(needle string, haystack []string) bool {
for _, value := range haystack {
if needle == value {
return true
}
}
return false
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package appx
// Sign Windows Universal (UWP) .appx and .appxbundle
import (
"fmt"
"io"
"os"
"github.com/sassoftware/relic/v7/lib/certloader"
"github.com/sassoftware/relic/v7/lib/magic"
"github.com/sassoftware/relic/v7/lib/signappx"
"github.com/sassoftware/relic/v7/signers"
"github.com/sassoftware/relic/v7/signers/pecoff"
"github.com/sassoftware/relic/v7/signers/zipbased"
)
var AppxSigner = &signers.Signer{
Name: "appx",
Magic: magic.FileTypeAPPX,
CertTypes: signers.CertTypeX509,
Transform: zipbased.Transform,
Sign: sign,
Verify: verify,
}
func init() {
pecoff.AddOpusFlags(AppxSigner)
signers.Register(AppxSigner)
}
func sign(r io.Reader, cert *certloader.Certificate, opts signers.SignOpts) ([]byte, error) {
digest, err := signappx.DigestAppxTar(r, opts.Hash, false)
if err != nil {
return nil, err
}
patch, priSig, _, err := digest.Sign(opts.Context(), cert, pecoff.OpusFlags(opts))
if err != nil {
return nil, err
}
opts.Audit.SetCounterSignature(priSig.CounterSignature)
return opts.SetBinPatch(patch)
}
func verify(f *os.File, opts signers.VerifyOpts) ([]*signers.Signature, error) {
size, err := f.Seek(0, io.SeekEnd)
if err != nil {
return nil, err
}
sig, err := signappx.Verify(f, size, opts.NoDigests)
if err != nil {
return nil, err
}
appxSig := sig
if sig.IsBundle {
for _, nested := range sig.Bundled {
appxSig = nested
break
}
}
return []*signers.Signature{{
Package: fmt.Sprintf("{%s} %s %s", appxSig.Name, appxSig.DisplayName, appxSig.Version),
SigInfo: pecoff.FormatOpus(sig.OpusInfo),
Hash: sig.Hash,
X509Signature: sig.Signature,
}}, nil
}
|
package api
import (
backendProto "github.com/clintjedwards/comet/backend/proto"
"github.com/clintjedwards/comet/proto"
"github.com/rs/zerolog/log"
)
// spawnComet starts and tracks the creation of a comet
func (api *API) spawnComet(request *backendProto.CreateMachineRequest) {
// It should never be possible to get into this state unless there is database corruption
comet, err := api.storage.GetComet(request.Id)
if err != nil {
log.Error().Err(err).Msg("could not get newly created comet")
return
}
updatedComet := &proto.Comet{
Id: comet.Id,
InstanceId: comet.InstanceId,
Name: comet.Name,
Notes: comet.Notes,
Size: comet.Size,
Address: comet.Address,
Created: comet.Created,
Modified: comet.Modified,
Deletion: comet.Deletion,
}
response, err := api.backendPlugin.CreateMachine(request)
if err != nil {
log.Error().Err(err).Interface("comet", updatedComet).Msg("could not create comet")
updatedComet.Status = proto.Comet_STOPPED
} else {
log.Info().Interface("comet", updatedComet).Msg("created new comet")
updatedComet.Status = proto.Comet_RUNNING
updatedComet.InstanceId = response.Machine.InstanceId
}
err = api.storage.UpdateComet(request.Id, updatedComet)
if err != nil {
log.Error().Err(err).Msg("could not update comet info")
return
}
return
}
|
package authentication
import "errors"
var (
ErrLogin = errors.New("given secret or CPF are incorrect")
ErrInvalidSecret = errors.New("given secret is invalid")
)
|
package model
import (
"encoding/json"
es_models "github.com/caos/zitadel/internal/eventstore/models"
"github.com/caos/zitadel/internal/user/model"
"testing"
"time"
)
func TestAppendDeactivatedEvent(t *testing.T) {
type args struct {
user *User
}
tests := []struct {
name string
args args
result *User
}{
{
name: "append deactivate event",
args: args{
user: &User{},
},
result: &User{State: int32(model.UserStateInactive)},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.args.user.appendDeactivatedEvent()
if tt.args.user.State != tt.result.State {
t.Errorf("got wrong result: expected: %v, actual: %v ", tt.result, tt.args.user)
}
})
}
}
func TestAppendReactivatedEvent(t *testing.T) {
type args struct {
user *User
}
tests := []struct {
name string
args args
result *User
}{
{
name: "append reactivate event",
args: args{
user: &User{},
},
result: &User{State: int32(model.UserStateActive)},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.args.user.appendReactivatedEvent()
if tt.args.user.State != tt.result.State {
t.Errorf("got wrong result: expected: %v, actual: %v ", tt.result, tt.args.user)
}
})
}
}
func TestAppendLockEvent(t *testing.T) {
type args struct {
user *User
}
tests := []struct {
name string
args args
result *User
}{
{
name: "append lock event",
args: args{
user: &User{},
},
result: &User{State: int32(model.UserStateLocked)},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.args.user.appendLockedEvent()
if tt.args.user.State != tt.result.State {
t.Errorf("got wrong result: expected: %v, actual: %v ", tt.result, tt.args.user)
}
})
}
}
func TestAppendUnlockEvent(t *testing.T) {
type args struct {
user *User
}
tests := []struct {
name string
args args
result *User
}{
{
name: "append unlock event",
args: args{
user: &User{},
},
result: &User{State: int32(model.UserStateActive)},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.args.user.appendUnlockedEvent()
if tt.args.user.State != tt.result.State {
t.Errorf("got wrong result: expected: %v, actual: %v ", tt.result, tt.args.user)
}
})
}
}
func TestAppendInitUserCodeEvent(t *testing.T) {
type args struct {
user *User
code *InitUserCode
event *es_models.Event
}
tests := []struct {
name string
args args
result *User
}{
{
name: "append init user code event",
args: args{
user: &User{},
code: &InitUserCode{Expiry: time.Hour * 30},
event: &es_models.Event{},
},
result: &User{InitCode: &InitUserCode{Expiry: time.Hour * 30}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.args.code != nil {
data, _ := json.Marshal(tt.args.code)
tt.args.event.Data = data
}
tt.args.user.appendInitUsercodeCreatedEvent(tt.args.event)
if tt.args.user.InitCode.Expiry != tt.result.InitCode.Expiry {
t.Errorf("got wrong result: expected: %v, actual: %v ", tt.result, tt.args.user)
}
})
}
}
|
package primitives
type World struct {
elements []Hitable
}
func (w *World) Add(h Hitable) {
w.elements = append(w.elements, h)
}
func (w *World) AddAll(hitables ...Hitable) {
for _, h := range hitables {
w.Add(h)
}
}
func (w *World) Hit(r Ray, tMin float64, tMax float64) (bool, HitRecord) {
hitAnything := false
closest := tMax
record := HitRecord{}
for _, element := range w.elements {
hit, tempRecord := element.Hit(r, tMin, closest)
if hit {
hitAnything = true
closest = tempRecord.T
record = tempRecord
}
}
return hitAnything, record
} |
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package example
import (
"context"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ReconnectToDUT,
Desc: "Demonstrates connecting to and disconnecting from DUT",
Contacts: []string{"nya@chromium.org", "tast-owners@google.com"},
Attr: []string{"group:mainline", "informational"},
})
}
func ReconnectToDUT(ctx context.Context, s *testing.State) {
d := s.DUT()
if !d.Connected(ctx) {
s.Error("Not initially connected to DUT")
}
s.Log("Disconnecting from DUT")
if err := d.Disconnect(ctx); err != nil {
s.Error("Failed to disconnect from DUT: ", err)
}
if d.Connected(ctx) {
s.Error("Still connected after disconnecting")
}
s.Log("Connecting to DUT")
if err := d.Connect(ctx); err != nil {
s.Fatal("Failed to connect to DUT: ", err)
}
if !d.Connected(ctx) {
s.Error("Not connected after connecting")
}
// Leave the DUT in a disconnected state.
// The connection should automatically be reestablished before the next test is run.
s.Log("Disconnecting from DUT again")
if err := d.Disconnect(ctx); err != nil {
s.Error("Failed to disconnect from DUT: ", err)
}
}
|
package session
import (
"errors"
uuid "github.com/satori/go.uuid"
)
// ErrStateNotFound is returned from Store.Get() when the requested session id was not found in the store.
var ErrStateNotFound = errors.New("no session state was found in the session store")
// Store represents a session data store.
// This is an abstract interface that can be implemented against several different types of data stores.
type Store interface {
// Save saves the provided `sessionState` and associated SessionID to the store.
// The `sessionState` parameter is typically a pointer to a struct containing all the data you want to be
// associated with the given SessionID.
Save(sid SessionID, suuid uuid.UUID, sessionState interface{}) error
// Get populates `sessionState` with the data previously saved for the given SessionID
Get(sid SessionID, sessionState interface{}) error
// GetSessionId retrieves the SessionId based on the Session Uuid
GetSessionId(suuid uuid.UUID) (SessionID, error)
// Exists tests if the given key is set
Exists(sid SessionID) (bool, error)
// Delete deletes all state data associated with the SessionID from the store.
Delete(sid SessionID) error
}
|
package leetcode
func findTheDifference(s string, t string) byte {
letter := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"}
smap := make(map[string]int)
tmap := make(map[string]int)
for i := range s {
_, ok := smap[string(s[i])]
if ok {
smap[string(s[i])] += 1
} else {
smap[string(s[i])] = 1
}
}
for i := range t {
_, ok := tmap[string(t[i])]
if ok {
tmap[string(t[i])] += 1
} else {
tmap[string(t[i])] = 1
}
}
var result []byte
for i := range letter {
if smap[letter[i]] != tmap[letter[i]] {
result = []byte(letter[i])
}
}
return result[0]
}
|
package quartz
import (
"errors"
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
// CronTrigger implements the quartz.Trigger interface.
// Used to fire a Job at given moments in time, defined with Unix 'cron-like' schedule definitions.
//
// Examples:
//
// Expression Meaning
// "0 0 12 * * ?" Fire at 12pm (noon) every day
// "0 15 10 ? * *" Fire at 10:15am every day
// "0 15 10 * * ?" Fire at 10:15am every day
// "0 15 10 * * ? *" Fire at 10:15am every day
// "0 * 14 * * ?" Fire every minute starting at 2pm and ending at 2:59pm, every day
// "0 0/5 14 * * ?" Fire every 5 minutes starting at 2pm and ending at 2:55pm, every day
// "0 0/5 14,18 * * ?" Fire every 5 minutes starting at 2pm and ending at 2:55pm,
// AND fire every 5 minutes starting at 6pm and ending at 6:55pm, every day
// "0 0-5 14 * * ?" Fire every minute starting at 2pm and ending at 2:05pm, every day
// "0 10,44 14 ? 3 WED" Fire at 2:10pm and at 2:44pm every Wednesday in the month of March.
// "0 15 10 ? * MON-FRI" Fire at 10:15am every Monday, Tuesday, Wednesday, Thursday and Friday
// "0 15 10 15 * ?" Fire at 10:15am on the 15th day of every month
type CronTrigger struct {
expression string
fields []*CronField
lastDefined int
}
// NewCronTrigger returns a new CronTrigger.
func NewCronTrigger(expr string) (*CronTrigger, error) {
fields, err := validateCronExpression(expr)
if err != nil {
return nil, err
}
lastDefined := -1
for i, field := range fields {
if len(field.values) > 0 {
lastDefined = i
}
}
// full wildcard expression
if lastDefined == -1 {
fields[0].values, _ = fillRange(0, 59)
}
return &CronTrigger{expr, fields, lastDefined}, nil
}
// NextFireTime returns the next time at which the CronTrigger is scheduled to fire.
func (ct *CronTrigger) NextFireTime(prev int64) (int64, error) {
parser := NewCronExpressionParser(ct.lastDefined)
return parser.nextTime(prev, ct.fields)
}
// Description returns a CronTrigger description.
func (ct *CronTrigger) Description() string {
return fmt.Sprintf("CronTrigger %s", ct.expression)
}
func (ct *CronTrigger) getOneField(idx int) []int {
field := ct.fields[idx]
if field == nil {
return nil
}
return field.values
}
func (ct *CronTrigger) Second() []int {
return ct.getOneField(secondIndex)
}
func (ct *CronTrigger) Minute() []int {
return ct.getOneField(minuteIndex)
}
func (ct *CronTrigger) Hour() []int {
return ct.getOneField(hourIndex)
}
func (ct *CronTrigger) Day() []int {
return ct.getOneField(dayOfMonthIndex)
}
func (ct *CronTrigger) Month() []int {
return ct.getOneField(monthIndex)
}
func (ct *CronTrigger) DayOfWeek() []int {
return ct.getOneField(dayOfWeekIndex)
}
func (ct *CronTrigger) Year() []int {
return ct.getOneField(yearIndex)
}
func (ct *CronTrigger) ExpressionForHuman() string {
var buf []string
if len(ct.Year()) > 0 {
buf = append(buf, fmt.Sprintf("year=%v", ct.Year()))
}
if len(ct.DayOfWeek()) > 0 {
buf = append(buf, fmt.Sprintf("dayOfWeek=%v", ct.DayOfWeek()))
}
if len(ct.Month()) > 0 {
buf = append(buf, fmt.Sprintf("month=%v", ct.Month()))
}
if len(ct.Day()) > 0 {
buf = append(buf, fmt.Sprintf("day=%v", ct.Day()))
}
if len(ct.Hour()) > 0 {
buf = append(buf, fmt.Sprintf("hour=%v", ct.Hour()))
}
if len(ct.Minute()) > 0 {
buf = append(buf, fmt.Sprintf("minute=%v", ct.Minute()))
}
if len(ct.Second()) > 0 {
buf = append(buf, fmt.Sprintf("second=%v", ct.Second()))
}
return strings.Join(buf, ", ")
}
// CronExpressionParser parses cron expressions.
type CronExpressionParser struct {
minuteBump bool
hourBump bool
dayBump bool
monthBump bool
yearBump bool
done bool
lastDefined int
maxDays int
}
// NewCronExpressionParser returns a new CronExpressionParser.
func NewCronExpressionParser(lastDefined int) *CronExpressionParser {
return &CronExpressionParser{false, false, false, false, false, false,
lastDefined, 0}
}
// CronField represents a parsed cron expression as an array.
type CronField struct {
values []int
}
// isEmpty checks if the CronField values array is empty.
func (cf *CronField) isEmpty() bool {
return len(cf.values) == 0
}
// String is the CronField fmt.Stringer implementation.
func (cf *CronField) String() string {
return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(cf.values)), ","), "[]")
}
var (
months = []string{"0", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}
days = []string{"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}
daysInMonth = []int{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
// the pre-defined cron expressions
special = map[string]string{
"@yearly": "0 0 0 1 1 *",
"@monthly": "0 0 0 1 * *",
"@weekly": "0 0 0 * * 0",
"@daily": "0 0 0 * * *",
"@hourly": "0 0 * * * *",
}
readDateLayout = "Mon Jan 2 15:04:05 2006"
writeDateLayout = "Jan 2 15:04:05 2006"
)
// <second> <minute> <hour> <day-of-month> <month> <day-of-week> <year>
// <year> field is optional
const (
secondIndex = iota
minuteIndex
hourIndex
dayOfMonthIndex
monthIndex
dayOfWeekIndex
yearIndex
)
func (parser *CronExpressionParser) nextTime(prev int64, fields []*CronField) (nextTime int64, err error) {
defer func() {
if r := recover(); r != nil {
switch x := r.(type) {
case string:
err = errors.New(x)
case error:
err = x
default:
err = errors.New("Unknown cron expression error")
}
}
}()
tfmt := time.Unix(prev/int64(time.Second), 0).Local().Format(readDateLayout)
ttok := strings.Split(strings.Replace(tfmt, " ", " ", 1), " ")
hms := strings.Split(ttok[3], ":")
parser.maxDays = maxDays(intVal(months, ttok[1]), atoi(ttok[4]))
second := parser.nextSeconds(atoi(hms[2]), fields[0])
minute := parser.nextMinutes(atoi(hms[1]), fields[1])
hour := parser.nextHours(atoi(hms[0]), fields[2])
dayOfMonth := parser.nextDay(intVal(days, ttok[0]), fields[5], atoi(ttok[2]), fields[3])
month := parser.nextMonth(ttok[1], fields[4])
year := parser.nextYear(ttok[4], fields[6])
nstr := fmt.Sprintf("%s %s %s:%s:%s %s", month, strconv.Itoa(dayOfMonth),
hour, minute, second, year)
ntime, err := time.ParseInLocation(writeDateLayout, nstr, time.Local)
nextTime = ntime.UnixNano()
return
}
var blanks = regexp.MustCompile(`[ ]+`)
// the ? wildcard is only used in the day of month and day of week fields
func validateCronExpression(expression string) ([]*CronField, error) {
var tokens []string
if value, ok := special[expression]; ok {
tokens = strings.Split(value, " ")
} else {
// replace wide spaces
expression = blanks.ReplaceAllString(expression, " ")
tokens = strings.Split(expression, " ")
}
length := len(tokens)
if length < 6 || length > 7 {
return nil, cronError("Invalid expression length")
}
if length == 6 {
tokens = append(tokens, "*")
}
if (tokens[3] != "?" && tokens[3] != "*") && (tokens[5] != "?" && tokens[5] != "*") {
return nil, cronError("Day field was set twice")
}
if tokens[6] != "*" {
return nil, cronError("Year field is not supported, use asterisk") // TODO: support year field
}
return buildCronField(tokens)
}
func buildCronField(tokens []string) ([]*CronField, error) {
var err error
fields := make([]*CronField, 7)
fields[0], err = parseField(tokens[0], 0, 59)
if err != nil {
return nil, err
}
fields[1], err = parseField(tokens[1], 0, 59)
if err != nil {
return nil, err
}
fields[2], err = parseField(tokens[2], 0, 23)
if err != nil {
return nil, err
}
fields[3], err = parseField(tokens[3], 1, 31)
if err != nil {
return nil, err
}
fields[4], err = parseField(tokens[4], 1, 12, months)
if err != nil {
return nil, err
}
fields[5], err = parseField(tokens[5], 0, 6, days)
if err != nil {
return nil, err
}
fields[6], err = parseField(tokens[6], 1970, 1970*2)
if err != nil {
return nil, err
}
return fields, nil
}
func parseField(field string, min int, max int, translate ...[]string) (*CronField, error) {
var dict []string
if len(translate) > 0 {
dict = translate[0]
}
// any value
if field == "*" || field == "?" {
return &CronField{[]int{}}, nil
}
// single value
i, err := strconv.Atoi(field)
if err == nil {
if inScope(i, min, max) {
return &CronField{[]int{i}}, nil
}
return nil, cronError("Single min/max validation error")
}
// list values
if strings.Contains(field, ",") {
return parseListField(field, dict)
}
// range values
if strings.Contains(field, "-") {
return parseRangeField(field, min, max, dict)
}
// step values
if strings.Contains(field, "/") {
return parseStepField(field, min, max, dict)
}
// literal single value
if dict != nil {
i := intVal(dict, field)
if i >= 0 {
if inScope(i, min, max) {
return &CronField{[]int{i}}, nil
}
return nil, cronError("Cron literal min/max validation error")
}
}
return nil, cronError("Cron parse error")
}
func parseListField(field string, translate []string) (*CronField, error) {
t := strings.Split(field, ",")
si, err := sliceAtoi(t)
if err != nil {
si, err = indexes(t, translate)
if err != nil {
return nil, err
}
}
sort.Ints(si)
return &CronField{si}, nil
}
func parseRangeField(field string, min int, max int, translate []string) (*CronField, error) {
var _range []int
t := strings.Split(field, "-")
if len(t) != 2 {
return nil, cronError("Parse cron range error")
}
from := normalize(t[0], translate)
to := normalize(t[1], translate)
if !inScope(from, min, max) || !inScope(to, min, max) {
return nil, cronError("Cron range min/max validation error")
}
_range, err := fillRange(from, to)
if err != nil {
return nil, err
}
return &CronField{_range}, nil
}
func parseStepField(field string, min int, max int, translate []string) (*CronField, error) {
var _step []int
t := strings.Split(field, "/")
if len(t) != 2 {
return nil, cronError("Parse cron step error")
}
from := normalize(t[0], translate)
step := atoi(t[1])
if !inScope(from, min, max) {
return nil, cronError("Cron step min/max validation error")
}
_step, err := fillStep(from, step, max)
if err != nil {
return nil, err
}
return &CronField{_step}, nil
}
func (parser *CronExpressionParser) setDone(index int) {
if parser.lastDefined == index {
parser.done = true
}
}
func (parser *CronExpressionParser) lastSet(index int) bool {
if parser.lastDefined <= index {
return true
}
return false
}
func (parser *CronExpressionParser) nextSeconds(prev int, field *CronField) string {
var next int
next, parser.minuteBump = parser.findNextValue(prev, field.values)
parser.setDone(secondIndex)
return alignDigit(next, "0")
}
func (parser *CronExpressionParser) nextMinutes(prev int, field *CronField) string {
var next int
if field.isEmpty() && parser.lastSet(minuteIndex) {
if parser.minuteBump {
next, parser.hourBump = bumpValue(prev, 59, 1)
return alignDigit(next, "0")
}
return alignDigit(prev, "0")
}
next, parser.hourBump = parser.findNextValue(prev, field.values)
parser.setDone(minuteIndex)
return alignDigit(next, "0")
}
func (parser *CronExpressionParser) nextHours(prev int, field *CronField) string {
var next int
if field.isEmpty() && parser.lastSet(hourIndex) {
if parser.hourBump {
next, parser.dayBump = bumpValue(prev, 23, 1)
return alignDigit(next, "0")
}
return alignDigit(prev, "0")
}
next, parser.dayBump = parser.findNextValue(prev, field.values)
parser.setDone(hourIndex)
return alignDigit(next, "0")
}
func (parser *CronExpressionParser) nextDay(prevWeek int, weekField *CronField,
prevMonth int, monthField *CronField) int {
var nextMonth int
if weekField.isEmpty() && monthField.isEmpty() && parser.lastSet(dayOfWeekIndex) {
if parser.dayBump {
nextMonth, parser.monthBump = bumpValue(prevMonth, parser.maxDays, 1)
return nextMonth
}
return prevMonth
}
if len(monthField.values) > 0 {
nextMonth, parser.monthBump = parser.findNextValue(prevMonth, monthField.values)
parser.setDone(dayOfMonthIndex)
return nextMonth
} else if len(weekField.values) > 0 {
nextWeek, bumpDayOfMonth := parser.findNextValue(prevWeek, weekField.values)
parser.setDone(dayOfWeekIndex)
var _step int
if len(weekField.values) == 1 && weekField.values[0] < prevWeek {
bumpDayOfMonth = false
}
if bumpDayOfMonth && len(weekField.values) == 1 {
_step = 7
} else {
_step = step(prevWeek, nextWeek, 7)
}
nextMonth, parser.monthBump = bumpValue(prevMonth, parser.maxDays, _step)
return nextMonth
}
return prevMonth
}
func (parser *CronExpressionParser) nextMonth(prev string, field *CronField) string {
var next int
if field.isEmpty() && parser.lastSet(dayOfWeekIndex) {
if parser.monthBump {
next, parser.yearBump = bumpLiteral(intVal(months, prev), 12, 1)
return months[next]
}
return prev
}
next, parser.yearBump = parser.findNextValue(intVal(months, prev), field.values)
parser.setDone(monthIndex)
return months[next]
}
func (parser *CronExpressionParser) nextYear(prev string, field *CronField) string {
var next int
if field.isEmpty() && parser.lastSet(yearIndex) {
if parser.yearBump {
next, _ = bumpValue(prev, int(^uint(0)>>1), 1)
return strconv.Itoa(next)
}
return prev
}
next, halt := parser.findNextValue(prev, field.values)
if halt != false {
panic("Out of expression range error")
}
return strconv.Itoa(next)
}
func bumpLiteral(iprev int, max int, step int) (int, bool) {
bumped := iprev + step
if bumped > max {
if bumped%max == 0 {
return iprev, true
}
return (bumped % max), true
}
return bumped, false
}
// returns bumped value, bump next
func bumpValue(prev interface{}, max int, step int) (int, bool) {
var iprev, bumped int
switch prev.(type) {
case string:
iprev, _ = strconv.Atoi(prev.(string))
case int:
iprev = prev.(int)
default:
panic("Unknown type at bumpValue")
}
bumped = iprev + step
if bumped > max {
return bumped % max, true
}
return bumped, false
}
// returns next value, bump next
func (parser *CronExpressionParser) findNextValue(prev interface{}, values []int) (int, bool) {
var iprev int
switch prev.(type) {
case string:
iprev, _ = strconv.Atoi(prev.(string))
case int:
iprev = prev.(int)
default:
panic("Unknown type at findNextValue")
}
if len(values) == 0 {
return iprev, false
}
for _, element := range values {
if parser.done {
if element >= iprev {
return element, false
}
} else {
if element > iprev {
parser.done = true
return element, false
}
}
}
return values[0], true
}
|
package utils
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
)
func InitGlobalVar() {
globalClient = nil
globalClient = nil
globalCookieJar = nil
request = nil
response = nil
Url = nil
jarBind = false
}
func GetUrlHtml(_url string) (string, error) {
if len(_url) <= 0 {
fmt.Println(_url, "....")
return "", ErrorUrl
}
if globalClient == nil {
globalClient = new(http.Client)
}
if globalCookieJar == nil {
globalCookieJar, _ = cookiejar.New(nil)
}
if !jarBind {
globalClient.Jar = globalCookieJar
jarBind = true
}
request, err = http.NewRequest("GET", _url, nil)
if err != nil {
fmt.Println("requset ...")
return "", ErrorRequest
}
request.Header.Add("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36")
response, err = globalClient.Do(request)
if err != nil {
fmt.Println("response ...")
return "", ErrorResponse
}
defer response.Body.Close()
var bodyBytes []byte
bodyBytes, _ = ioutil.ReadAll(response.Body)
var bodyString string
bodyString = fmt.Sprintf("%s", bodyBytes)
Url = request.URL
//enc := mahonia.NewDecoder("GB18030")
return bodyString, nil
}
func EncodeDatas(datas map[string]string) []byte {
postValue := url.Values{}
for key, value := range datas {
postValue.Set(key, value)
}
encode_data := postValue.Encode()
postByte := []byte(encode_data)
return postByte
}
func PostUrlHtml(_url string, datas map[string]string) (string, error) {
if len(_url) <= 0 {
fmt.Println(_url, "....")
return "", ErrorUrl
}
if globalClient == nil {
globalClient = new(http.Client)
}
if globalCookieJar == nil {
globalCookieJar, _ = cookiejar.New(nil)
}
if !jarBind {
globalClient.Jar = globalCookieJar
jarBind = true
}
temp := EncodeDatas(datas)
postBuffer := bytes.NewReader(temp)
request, err = http.NewRequest("POST", _url, postBuffer)
if err != nil {
fmt.Println("requset error...")
return "", ErrorRequest
}
request.Header.Add("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36")
request.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
request.Header.Add("Accept", "application/json, text/javascript, */*; q=0.01")
request.Header.Add("Accept-Encoding", "gzip, deflate")
response, err = globalClient.Do(request)
if err != nil {
fmt.Println("response error...")
return "", ErrorResponse
}
defer response.Body.Close()
var bodyBytes []byte
bodyBytes, _ = ioutil.ReadAll(response.Body)
var bodyString string
bodyString = fmt.Sprintf("%s", bodyBytes)
Url = request.URL
//enc := mahonia.NewDecoder("GB18030")
return bodyString, nil
}
|
package main
func main() {
var x int
for x
> 0 {
}
}
|
package pretend
import (
"time"
"github.com/spf13/viper"
)
func PayPeriodDuration() time.Duration {
if viper.InConfig("PayPeriodDuration") {
return viper.GetDuration("PayPeriodDuration")
}
d, _ := time.ParseDuration("5h") // default to 5h
return d
}
func VotingPeriodDuration() time.Duration {
if viper.InConfig("VotingPeriodDuration") {
return viper.GetDuration("VotingPeriodDuration")
}
d, _ := time.ParseDuration("30s") // default to 30s
return d
}
|
package main
import (
"errors"
"fmt"
"io"
"net/url"
rtmp "github.com/junli1026/gortmp"
)
func validateRTMPStreamURL(uri string) error {
url, err := url.Parse(uri)
if err != nil {
return err
}
if url.Path != "/live" && url.Path != "/LIVE" {
return errors.New("invalid string url " + uri)
}
return nil
}
func newRTMPService(rtmpAddr string, accessor relayAccessor, registrar streamRegistrar) *rtmp.RtmpServer {
s := rtmp.NewServer()
s.OnStreamData(func(meta *rtmp.StreamMeta, data *rtmp.StreamData) error {
streamKey := meta.StreamName()
if data.Type == rtmp.FlvHeader { //stream begin
if err := validateRTMPStreamURL(meta.URL()); err != nil {
return err
}
if _, ok := registrar.checkStream(streamKey); !ok {
return fmt.Errorf("unauthenticated stream key %v", streamKey)
}
r := accessor.getRelay(streamKey)
if r == nil {
logger.Infof("relay runner for %s not exist, create one", streamKey)
r = accessor.createRelay(streamKey)
if r == nil {
return fmt.Errorf("failed to create relay for stream %v", streamKey)
}
r.setStreamInfo(meta)
go r.run()
}
}
relay := accessor.getRelay(streamKey)
return relay.receiveUpStreamData(data.Data)
})
s.OnStreamClose(func(meta *rtmp.StreamMeta, err error) {
streamKey := meta.StreamName()
msg := fmt.Sprintf("stream key %v stopped for reason: %v\n", streamKey, err)
if err == io.EOF {
logger.Info(msg)
} else {
logger.Error(msg)
}
relay := accessor.getRelay(streamKey)
if relay == nil {
logger.Errorf("relay with stream key %v not found.\n", streamKey)
return
}
relay.stop()
accessor.deleteRelay(streamKey)
})
return s
}
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
type RedirectInformation struct {
// Indicates the redirect is enable.
RedirectEnabled bool `json:"redirectEnabled,omitempty"`
RedirectAddressType RedirectAddressType `json:"redirectAddressType,omitempty"`
// Indicates the address of the redirect server.
RedirectServerAddress string `json:"redirectServerAddress,omitempty"`
}
|
package utils
import (
"os"
"github.com/ozonva/ova-food-api/internal/logger"
"gopkg.in/yaml.v3"
)
type GRPC struct {
GRPCPort string `yaml:"grpc_port"`
}
type DATABASE struct {
DBHost string `yaml:"db_host"`
DBPort string `yaml:"db_port"`
DBUser string `yaml:"db_user"`
DBPassword string `yaml:"db_password"`
DBName string `yaml:"db_name"`
DBSslMode string `yaml:"db_ssl_mode"`
DBDriver string `yaml:"db_driver"`
}
type KAFKA struct {
KafkaBroker string `yaml:"broker_kafka"`
KafkaTopic string `yaml:"topic"`
}
type APP struct {
AppChunkSize int `yaml:"chunk_size"`
Logfile string `yaml:"logfile"`
}
type Config struct {
Grpc GRPC `yaml:"grpc"`
Database DATABASE `yaml:"database"`
Kafka KAFKA `yaml:"kafka"`
App APP `yaml:"app"`
}
func LoadConfig(path string) (*Config, error) {
config := &Config{}
file, err := os.Open(path)
defer func() {
err := file.Close()
if err != nil {
logger.GlobalLogger.Warn().Msg("Cant close file")
}
}()
if err != nil {
return nil, err
}
decoder := yaml.NewDecoder(file)
err = decoder.Decode(&config)
if err != nil {
return nil, err
}
return config, nil
}
|
package main
import "testing"
func TestGns3License(t *testing.T) {
license, err := getLicense("00000000", "gns3vm")
if err != nil {
t.Errorf("License had an error: %s", err.Error())
}
if license != "73635fd3b0a13ad0" {
t.Errorf("License was incorrect got: %s, want: %s.", license, "73635fd3b0a13ad0")
}
}
func TestRandLicense(t *testing.T) {
license, err := getLicense("12345678", "asdgsdgsgsasdf")
if err != nil {
t.Errorf("License had an error: %s", err.Error())
}
if license != "c839f272e688e0f0" {
t.Errorf("License was incorrect got: %s, want: %s.", license, "c839f272e688e0f0")
}
}
func TestShortNameLicense(t *testing.T) {
license, err := getLicense("789", "hosting")
if err != nil {
t.Errorf("License had an error: %s", err.Error())
}
if license != "6d9850f05d783319" {
t.Errorf("License was incorrect got: %s, want: %s.", license, "6d9850f05d783319")
}
}
func TestLongNameLicense(t *testing.T) {
license, err := getLicense("15325325", "aLongerHostname")
if err != nil {
t.Errorf("License had an error: %s", err.Error())
}
if license != "ab5fc4bda4077261" {
t.Errorf("License was incorrect got: %s, want: %s.", license, "ab5fc4bda4077261")
}
}
|
package weibo
type Following struct {
// 关注者的id
FromUserID int64 `json:"from_user_id" db:"from_user_id"`
// 被关注者的id
ToUserID int64 `json:"to_user_id" db:"to_user_id"`
// 关注时间
CreatedAt int64 `json:"created_at" db:"created_at"`
}
|
package nginx
import (
"fmt"
"github.com/layer5io/gokit/errors"
)
var (
ErrOpInvalid = errors.New(errors.ErrOpInvalid, "Invalid operation")
)
// ErrInstallMesh is the error for install mesh
func ErrInstallMesh(err error) error {
return errors.New(errors.ErrInstallMesh, fmt.Sprintf("Error installing mesh: %s", err.Error()))
}
// ErrMeshConfig is the error for mesh config
func ErrMeshConfig(err error) error {
return errors.New(errors.ErrMeshConfig, fmt.Sprintf("Error configuration mesh: %s", err.Error()))
}
// ErrPortForward is the error for mesh port forward
func ErrPortForward(err error) error {
return errors.New(errors.ErrPortForward, fmt.Sprintf("Error portforwarding mesh gui: %s", err.Error()))
}
// ErrClientConfig is the error for setting client config
func ErrClientConfig(err error) error {
return errors.New(errors.ErrClientConfig, fmt.Sprintf("Error setting client config: %s", err.Error()))
}
// ErrPortForward is the error for setting clientset
func ErrClientSet(err error) error {
return errors.New(errors.ErrClientSet, fmt.Sprintf("Error setting clientset: %s", err.Error()))
}
// ErrStreamEvent is the error for streaming event
func ErrStreamEvent(err error) error {
return errors.New(errors.ErrStreamEvent, fmt.Sprintf("Error streaming event: %s", err.Error()))
}
// ErrApplyOperation is the error for applying operation event
func ErrApplyOperation(err error) error {
return errors.New(errors.ErrApplyOperation, fmt.Sprintf("Error applying operation: %s", err.Error()))
}
// ErrListOperations is the error for listing operations event
func ErrListOperations(err error) error {
return errors.New(errors.ErrListOperations, fmt.Sprintf("Error listing operations: %s", err.Error()))
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"github.com/cockroachdb/cockroach/pkg/sql/colexec/execgen"
"github.com/cockroachdb/cockroach/pkg/sql/colexecerror"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/gostdlib/x/tools/imports"
)
func main() {
gen := execgenTool{stdErr: os.Stderr}
if !gen.run(os.Args[1:]...) {
os.Exit(2)
}
}
type execgenTool struct {
// fmtSources runs the go fmt tool on code generated by execgenTool, if this
// setting is true.
fmtSources bool
// stdErr is the writer to which all standard error output will be redirected.
stdErr io.Writer
// cmdLine stores the set of flags used to invoke the Execgen tool.
cmdLine *flag.FlagSet
verbose bool
}
// generator is a func that, given an input file's contents as a string,
// outputs the result of execgen to the outputFile.
type generator func(inputFileContents string, outputFile io.Writer) error
var generators = make(map[string]entry)
type entry struct {
fn generator
inputFile string
}
func registerGenerator(g generator, outputFile, inputFile string) {
if _, ok := generators[outputFile]; ok {
colexecerror.InternalError(errors.AssertionFailedf("%s generator already registered", outputFile))
}
generators[outputFile] = entry{fn: g, inputFile: inputFile}
}
func (g *execgenTool) run(args ...string) bool {
// Parse command line.
var printDeps bool
var template string
g.cmdLine = flag.NewFlagSet("execgen", flag.ContinueOnError)
g.cmdLine.SetOutput(g.stdErr)
g.cmdLine.Usage = g.usage
g.cmdLine.BoolVar(&g.fmtSources, "fmt", true, "format and imports-process generated code")
g.cmdLine.BoolVar(&g.verbose, "verbose", false, "print out debug information to stderr")
g.cmdLine.BoolVar(&printDeps, "M", false, "print the dependency list")
g.cmdLine.StringVar(&template, "template", "", "path")
err := g.cmdLine.Parse(args)
if err != nil {
return false
}
// Get remaining args after any flags have been parsed.
args = g.cmdLine.Args()
if len(args) != 1 {
g.cmdLine.Usage()
g.reportError(errors.New("invalid number of arguments"))
return false
}
outPath := args[0]
_, file := filepath.Split(outPath)
e := generators[file]
if e.fn == nil {
g.reportError(errors.Errorf("unrecognized filename: %s", file))
return false
}
if template != "" {
if e.inputFile == "" {
g.reportError(errors.Errorf("file %s expected no input template, found %s", file, template))
return false
}
e.inputFile = template
}
if err := g.generate(outPath, e); err != nil {
g.reportError(err)
return false
}
return true
}
var emptyCommentRegex = regexp.MustCompile(`[ \t]*//[ \t]*\n`)
var emptyBlockCommentRegex = regexp.MustCompile(`[ \t]*/\*[ \t]*\*/[ \t]*\n`)
func (g *execgenTool) generate(path string, entry entry) error {
var buf bytes.Buffer
buf.WriteString("// Code generated by execgen; DO NOT EDIT.\n")
var inputFileContents string
var err error
if entry.inputFile != "" {
inputFileBytes, err := ioutil.ReadFile(entry.inputFile)
if err != nil {
return err
}
inputFileContents, err = execgen.Generate(string(inputFileBytes))
if err != nil {
return err
}
if g.verbose {
fmt.Fprintln(os.Stderr, "generated code before text/template runs")
fmt.Fprintln(os.Stderr, "-----------------------------------")
fmt.Fprintln(os.Stderr, inputFileContents)
fmt.Fprintln(os.Stderr, "-----------------------------------")
}
}
err = entry.fn(inputFileContents, &buf)
if err != nil {
return err
}
b := buf.Bytes()
// Delete empty comments ( // or /* */) that tend to get generated by templating.
b = emptyCommentRegex.ReplaceAll(b, []byte{})
b = emptyBlockCommentRegex.ReplaceAll(b, []byte{})
// Delete execgen_template build tag.
b = bytes.ReplaceAll(b, []byte("// +build execgen_template"), []byte{})
if g.fmtSources {
oldB := b
b, err = imports.Process(path, b,
&imports.Options{Comments: true, TabIndent: true, TabWidth: 2})
if err != nil {
// Write out incorrect source for easier debugging.
b = oldB
err = errors.Wrap(err, "Code formatting failed with Go parse error")
}
}
// Ignore any write error if another error already occurred.
_, writeErr := os.Stdout.Write(b)
if err != nil {
return err
}
return writeErr
}
// usage is a replacement usage function for the flags package.
func (g *execgenTool) usage() {
fmt.Fprintf(g.stdErr, "Execgen is a tool for generating templated code related to ")
fmt.Fprintf(g.stdErr, "columnarized execution.\n\n")
fmt.Fprintf(g.stdErr, "Usage:\n")
fmt.Fprintf(g.stdErr, "\texecgen [path]...\n\n")
fmt.Fprintf(g.stdErr, "Supported filenames are:\n")
for filename := range generators {
fmt.Fprintf(g.stdErr, "\t%s\n", filename)
}
fmt.Fprintf(g.stdErr, "\n")
fmt.Fprintf(g.stdErr, "Flags:\n")
g.cmdLine.PrintDefaults()
fmt.Fprintf(g.stdErr, "\n")
}
func (g *execgenTool) reportError(err error) {
fmt.Fprintf(g.stdErr, "ERROR: %v\n", err)
}
|
package balancer
import (
"testing"
"time"
)
////////////////////// balancer.HTTPMonitor Tests //////////////////////
// Test the construction of HTTPMonitor
func TestHTTPMonitorConstruction(t *testing.T) {
m := NewHTTPMonitor(DefaultHealthCheckRoute)
// things should be initialized correctly by constructor
if m.Path != DefaultHealthCheckRoute {
t.Errorf("monitor contructed with Path: %s, want %s", m.Path,
DefaultHealthCheckRoute)
}
if m.Client != nil {
t.Errorf("monitor contructed with unknown client, want nil")
}
if m.Delay != DefaultHealthCheckTimeout {
t.Errorf("monitor construct with Delay: %s, want %s", m.Delay,
DefaultHealthCheckTimeout)
}
}
// Test the Check method for correct Health route
func TestHTTPMonitorCheckRoute(t *testing.T) {
transportMock := NewTransportMock()
transportMock.ExpectedRoute = "/unexpected/route"
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
// transport expects "/unexpected/route", but monitor sends default
if check := m.Check(host1); check != unhealthy {
t.Errorf("m.check(host1): %b, want %b", check, unhealthy)
}
transportMock.ExpectedRoute = DefaultHealthCheckRoute
// transport expectes default, and monitor sends default
if check := m.Check(host1); check != healthy {
t.Errorf("m.check(host1): %b, want %b", check, healthy)
}
}
// Test the Check method for 200 status
func TestHTTPMonitorCheck200(t *testing.T) {
transportMock := NewTransportMock()
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
// hosts are assumed "healthy" by transportMock unless specified otherwise
if check := m.Check(host1); check != healthy {
t.Errorf("m.Check(host1): %b, want %b", check, healthy)
}
}
// Test the Check method for 503 status
func TestHTTPMonitorCheck503(t *testing.T) {
transportMock := NewTransportMock()
transportMock.Unhealthy[host1] = true
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
// hosts are assumed "healthy" by transportMock unless specified otherwise
if check := m.Check(host1); check != unhealthy {
t.Errorf("m.Check(host1): %b, want %b", check, unhealthy)
}
}
/*// Test the Check method for timeout
func TestHTTPMonitorCheckTimeout(t *testing.T) {
transportMock := NewTransportMock()
transportMock.Timeout = time.Duration(5 * time.Second) // larger than default
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
m.Client.Timeout = DefaultHealthCheckTimeout
// request should be timed out by client
if check := m.Check(host1); check != unhealthy {
t.Errorf("m.Check(host1): %b, want %b", check, unhealthy)
}
}*/
// Watch tests:
// Test the Watch method for empty Balancer
func TestHTTPMonitorWatchEmpty(t *testing.T) {
transportMock := NewTransportMock()
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
// remove delay so we can measure iterations as they happen
m.Delay = time.Duration(0 * time.Second)
balancerMock := &BalancerMock{
BalanceSuccess: true,
RegisterSuccess: true,
DeregisterSignal: make(chan string),
Pool: make(chan []string),
}
emptyPool := make([]string, 0)
// balancerMock will block until a pool is sent on the channel
go m.Watch(balancerMock)
balancerMock.Pool <- emptyPool
testing := true
for testing {
select {
// non-blocking receive on Deregister shouldn't touched
// there are no Hosts to check
case _ = <-balancerMock.DeregisterSignal:
t.Fatalf("HTTPMonitor deregistered on empty pool")
// non blocking send suceeds when monitor asks for pool again.
case balancerMock.Pool <- emptyPool:
// expected behavior
testing = false
default:
t.Log("Looping...")
}
}
}
// Test the Watch method for All 200 status Balancer
func TestHTTPMonitorWatchStatusOK(t *testing.T) {
transportMock := NewTransportMock()
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
// remove delay so we can measure iterations as they happen
m.Delay = time.Duration(0 * time.Second)
balancerMock := &BalancerMock{
BalanceSuccess: true,
RegisterSuccess: true,
DeregisterSignal: make(chan string),
Pool: make(chan []string),
}
healthyPool := []string{host1, host2, host3}
go m.Watch(balancerMock)
balancerMock.Pool <- healthyPool
testing := true
for testing {
select {
case _ = <-balancerMock.DeregisterSignal:
t.Fatalf("HTTPMonitor deregistered on healthy pool")
case balancerMock.Pool <- healthyPool:
testing = false
default:
t.Log("Looping...")
}
}
}
// Test the Watch method for Failing hosts Balancer
func TestHTTPMonitorWatchFailingHosts(t *testing.T) {
transportMock := NewTransportMock()
transportMock.Unhealthy[host3] = true
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
// remove delay so we can measure iterations as they happen
m.Delay = time.Duration(0 * time.Second)
balancerMock := &BalancerMock{
BalanceSuccess: true,
RegisterSuccess: true,
DeregisterSignal: make(chan string),
Pool: make(chan []string),
}
halfHealthyPool := []string{host1, host2, host3}
go m.Watch(balancerMock)
balancerMock.Pool <- halfHealthyPool
testing := true
for testing {
select {
case deregister := <-balancerMock.DeregisterSignal:
if deregister != host3 {
t.Fatalf("Deregistered %s, want %s", deregister, host3)
}
testing = false
case balancerMock.Pool <- halfHealthyPool:
t.Fatalf("Failed to deregister %s", host3)
default:
t.Log("Looping...")
}
}
}
/*// Test the Watch method for Timeout Hosts Balancer
func TestHTTPMonitorWatchTimeout(t *testing.T) {
transportMock := NewTransportMock()
transportMock.Timeout = time.Duration(5 * time.Second)
clientMock := NewClientMock(transportMock)
m := NewHTTPMonitor(DefaultHealthCheckRoute)
m.Client = clientMock
balancerMock := &BalancerMock{
BalanceSuccess: true,
RegisterSuccess: true,
DeregisterSignal: make(chan string),
Pool: make(chan []string),
}
timeoutPool := []string{host1}
go m.Watch(balancerMock)
balancerMock.Pool <- timeoutPool
testing := true
iter := 0
for testing {
if iter == MAX_ITERATIONS {
t.Fatalf("MAX_ITERATIONS EXCEEDED")
}
iter++
select {
case deregister := <-balancerMock.DeregisterSignal:
if deregister != host1 {
t.Fatalf("Deregistered %s, want %s", deregister, host1)
}
testing = false
case balancerMock.Pool <- timeoutPool:
t.Fatalf("Failed to deregister %s", host1)
default:
t.Log("Looping...")
}
}
}*/
|
package mondohttp
import "testing"
func TestNewAccountsRequest(t *testing.T) {
req := NewAccountsRequest("token")
assertReqEquals(t, req, `GET /accounts HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Authorization: token
`)
}
func TestBalanceRequest(t *testing.T) {
req := NewBalanceRequest("token", "acc_123")
assertReqEquals(t, req, `GET /balance?account_id=acc_123 HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Authorization: token
`)
}
func TestTransactionRequest(t *testing.T) {
req := NewTransactionRequest("token", "trans_456", false)
assertReqEquals(t, req, `GET /transactions/trans_456 HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Authorization: token
`)
}
func TestTransactionRequest_Merchants(t *testing.T) {
req := NewTransactionRequest("token", "trans_456", true)
assertReqEquals(t, req, `GET /transactions/trans_456?expand%5B%5D=merchant HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Authorization: token
`)
}
func TestTransactionsRequest_NoPage(t *testing.T) {
req := NewTransactionsRequest("token", "acc_123", true, "", "", 0)
assertReqEquals(t, req, `GET /transactions?account_id=acc_123&expand%5B%5D=merchant HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Authorization: token
`)
}
func TestTransactionsRequest_Page(t *testing.T) {
req := NewTransactionsRequest("token", "acc_123", true, "start", "end", 50)
assertReqEquals(t, req, `GET /transactions?account_id=acc_123&before=end&expand%5B%5D=merchant&limit=50&since=start HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Authorization: token
`)
}
func TestAnnotateTransactionRequest(t *testing.T) {
req := NewAnnotateTransactionRequest("token", "trans_456", map[string]string{
"test_a": "abc",
"test_b": "",
})
assertReqEquals(t, req, `PATCH /transactions/trans_456 HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Content-Length: 46
Authorization: token
Content-Type: application/x-www-form-urlencoded
metadata%5Btest_a%5D=abc&metadata%5Btest_b%5D=`)
}
func TestCreateURLFeedItemRequest(t *testing.T) {
req := NewCreateURLFeedItemRequest("token", "acc_123", "https://www.google.com", "My feed item", "http://test.com/image.png")
assertReqEquals(t, req, `POST /feed HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Content-Length: 149
Authorization: token
Content-Type: application/x-www-form-urlencoded
account_id=acc_123¶ms%5Bimage_url%5D=http%3A%2F%2Ftest.com%2Fimage.png¶ms%5Btitle%5D=My+feed+item&type=basic&url=https%3A%2F%2Fwww.google.com`)
}
func TestCreateBasicFeedItemRequest_Minimum(t *testing.T) {
req := NewCreateBasicFeedItemRequest("token", "acc_123", "", "My feed item", "http://test.com/image.png", "You've created a feed item!", "", "", "")
assertReqEquals(t, req, `POST /feed HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Content-Length: 165
Authorization: token
Content-Type: application/x-www-form-urlencoded
account_id=acc_123¶ms%5Bbody%5D=You%27ve+created+a+feed+item%21¶ms%5Bimage_url%5D=http%3A%2F%2Ftest.com%2Fimage.png¶ms%5Btitle%5D=My+feed+item&type=basic`)
}
func TestCreateBasicFeedItemRequest_Maximum(t *testing.T) {
req := NewCreateBasicFeedItemRequest("token", "acc_123", "https://override.com/", "My feed item", "http://test.com/image.png", "You've created a feed item!", "bg-color", "h1-color", "p-color")
assertReqEquals(t, req, `POST /feed HTTP/1.1
Host: api.getmondo.co.uk
User-Agent: Go-http-client/1.1
Content-Length: 301
Authorization: token
Content-Type: application/x-www-form-urlencoded
account_id=acc_123¶ms%5Bbackground_color%5D=bg-color¶ms%5Bbody%5D=You%27ve+created+a+feed+item%21¶ms%5Bbody_color%5D=p-color¶ms%5Bimage_url%5D=http%3A%2F%2Ftest.com%2Fimage.png¶ms%5Btitle%5D=My+feed+item¶ms%5Btitle_color%5D=h1-color&type=basic&url=https%3A%2F%2Foverride.com%2F`)
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"bytes"
"flag"
"io"
"path/filepath"
"strings"
"testing"
"github.com/cockroachdb/datadriven"
"github.com/cockroachdb/errors"
)
var (
testDataGlob = flag.String("d", "testdata/[^.]*", "test data glob")
)
func TestOptgen(t *testing.T) {
paths, err := filepath.Glob(*testDataGlob)
if err != nil {
t.Fatal(err)
}
if len(paths) == 0 {
t.Fatalf("no testfiles found matching: %s", *testDataGlob)
}
for _, path := range paths {
t.Run(filepath.Base(path), func(t *testing.T) {
datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
var buf bytes.Buffer
gen := optgen{useGoFmt: true, maxErrors: 2, stdErr: &buf}
gen.globResolver = func(pattern string) ([]string, error) {
switch pattern {
case "test.opt":
return []string{"test.opt"}, nil
case "all":
return []string{"test.opt", "test2.opt"}, nil
case "not-found.opt":
return []string{"notfound.opt"}, nil
default:
return nil, errors.New("invalid source")
}
}
// Resolve input file to the data-driven input text.
gen.fileResolver = func(name string) (io.Reader, error) {
switch name {
case "test.opt":
return strings.NewReader(d.Input), nil
case "test2.opt":
return strings.NewReader(""), nil
default:
return nil, errors.New("invalid filename")
}
}
args := make([]string, len(d.CmdArgs))
for i := range args {
args[i] = d.CmdArgs[i].String()
}
gen.run(args...)
// Suppress DO NOT EDIT so that reviewable will still show the
// file by default.
return strings.Replace(buf.String(), "DO NOT EDIT.", "[omitted]", -1)
})
})
}
}
|
package notification
import "time"
type Sender interface {
Send(msg string)
}
type Notification interface {
SendIfNeed(t time.Time, s Sender)
}
type TimeChecker interface {
Check(time time.Time) bool
}
type MessageProvider interface {
Message() string
}
|
package handlers
import (
"fmt"
log "log"
"strings"
iris "github.com/kataras/iris/v12"
minio "github.com/minio/minio-go/v6"
cnf "github.com/rzrbld/adminio-api/config"
resph "github.com/rzrbld/adminio-api/response"
)
var BuckList = func(ctx iris.Context) {
lb, err := minioClnt.ListBuckets()
var res = resph.BodyResHandler(ctx, err, lb)
ctx.JSON(res)
}
var BuckListExtended = func(ctx iris.Context) {
lb, err := minioClnt.ListBuckets()
allBuckets := []interface{}{}
for _, bucket := range lb {
bn, err := minioClnt.GetBucketNotification(bucket.Name)
if err != nil {
log.Print("Error while getting bucket notification")
}
br := iris.Map{"name": bucket.Name, "info": bucket, "events": bn}
allBuckets = append(allBuckets, br)
}
var res = resph.BodyResHandler(ctx, err, allBuckets)
ctx.JSON(res)
}
var BuckMake = func(ctx iris.Context) {
var newBucket = ctx.FormValue("newBucket")
var newBucketRegion = ctx.FormValue("newBucketRegion")
if newBucketRegion == "" {
newBucketRegion = cnf.Region
}
if resph.CheckAuthBeforeRequest(ctx) != false {
err := minioClnt.MakeBucket(newBucket, newBucketRegion)
var res = resph.DefaultResHandler(ctx, err)
ctx.JSON(res)
} else {
ctx.JSON(resph.DefaultAuthError())
}
}
var BuckDelete = func(ctx iris.Context) {
var bucketName = ctx.FormValue("bucketName")
if resph.CheckAuthBeforeRequest(ctx) != false {
err := minioClnt.RemoveBucket(bucketName)
var res = resph.DefaultResHandler(ctx, err)
ctx.JSON(res)
} else {
ctx.JSON(resph.DefaultAuthError())
}
}
var BuckGetLifecycle = func(ctx iris.Context) {
var bucketName = ctx.FormValue("bucketName")
lc, err := minioClnt.GetBucketLifecycle(bucketName)
var res = resph.BodyResHandler(ctx, err, lc)
ctx.JSON(res)
}
var BuckSetLifecycle = func(ctx iris.Context) {
var bucketName = ctx.FormValue("bucketName")
var lifecycle = ctx.FormValue("lifecycle")
if resph.CheckAuthBeforeRequest(ctx) != false {
err := minioClnt.SetBucketLifecycle(bucketName, lifecycle)
var res = resph.DefaultResHandler(ctx, err)
ctx.JSON(res)
} else {
ctx.JSON(resph.DefaultAuthError())
}
}
var BuckGetEvents = func(ctx iris.Context) {
var bucket = ctx.FormValue("bucket")
bn, err := minioClnt.GetBucketNotification(bucket)
var res = resph.BodyResHandler(ctx, err, bn)
ctx.JSON(res)
}
var BuckSetEvents = func(ctx iris.Context) {
var arrARN = strings.Split(ctx.FormValue("stsARN"), ":")
var stsARN = minio.NewArn(arrARN[1], arrARN[2], arrARN[3], arrARN[4], arrARN[5])
var bucket = ctx.FormValue("bucket")
var eventTypes = strings.Split(ctx.FormValue("eventTypes"), ",")
var filterPrefix = ctx.FormValue("filterPrefix")
var filterSuffix = ctx.FormValue("filterSuffix")
if resph.CheckAuthBeforeRequest(ctx) != false {
bucketNotify, err := minioClnt.GetBucketNotification(bucket)
var newNotification = minio.NewNotificationConfig(stsARN)
for _, event := range eventTypes {
switch event {
case "put":
newNotification.AddEvents(minio.ObjectCreatedAll)
case "delete":
newNotification.AddEvents(minio.ObjectRemovedAll)
case "get":
newNotification.AddEvents(minio.ObjectAccessedAll)
}
}
if filterPrefix != "" {
newNotification.AddFilterPrefix(filterPrefix)
}
if filterSuffix != "" {
newNotification.AddFilterSuffix(filterSuffix)
}
switch arrARN[2] {
case "sns":
if bucketNotify.AddTopic(newNotification) {
err = fmt.Errorf("Overlapping Topic configs")
}
case "sqs":
if bucketNotify.AddQueue(newNotification) {
err = fmt.Errorf("Overlapping Queue configs")
}
case "lambda":
if bucketNotify.AddLambda(newNotification) {
err = fmt.Errorf("Overlapping lambda configs")
}
}
err = minioClnt.SetBucketNotification(bucket, bucketNotify)
var res = resph.DefaultResHandler(ctx, err)
ctx.JSON(res)
} else {
ctx.JSON(resph.DefaultAuthError())
}
}
var BuckRemoveEvents = func(ctx iris.Context) {
var bucket = ctx.FormValue("bucket")
if resph.CheckAuthBeforeRequest(ctx) != false {
err := minioClnt.RemoveAllBucketNotification(bucket)
var res = resph.DefaultResHandler(ctx, err)
ctx.JSON(res)
} else {
ctx.JSON(resph.DefaultAuthError())
}
}
|
package binance
import (
"encoding/json"
)
func (b *Binance) clientGetExchangeInfo() (*ExchangeInfo, error) {
body, err := b.clientGet("/api/v3/exchangeInfo")
if err != nil {
log.WithError(err).Error("can't get ExchangeInfo")
return nil, err
}
exchangeInfo := &ExchangeInfo{}
err = json.Unmarshal(body, &exchangeInfo)
if err != nil {
log.WithError(err).WithField("body", string(body)).Error("can't unmarshal ExchangeInfo")
return nil, err
}
return exchangeInfo, nil
}
type ExchangeInfo struct {
Timezone string `json:"timezone"`
ServerTime int64 `json:"serverTime"`
RateLimits []interface{} `json:"rateLimits"`
ExchangeFilters []interface{} `json:"exchangeFilters"`
Symbols []Symbols `json:"symbols"`
}
type Symbols struct {
Symbol string `json:"symbol"`
Status string `json:"status"`
BaseAsset string `json:"baseAsset"`
BaseAssetPrecision int `json:"baseAssetPrecision"`
QuoteAsset string `json:"quoteAsset"`
QuotePrecision int `json:"quotePrecision"`
OrderTypes bool `json:"isSpotTradingAllowed"`
IsMarginTradingAllowed bool `json:"isMarginTradingAllowed"`
Filters []interface{} `json:"filters"`
}
|
package main
import "fmt"
func main() {
var n int
fmt.Scan(&n)
var counter int
counter = n
for i := 1; i <= n; i++ {
for j := 1; j <= n; j++ {
if j < counter {
fmt.Print(" ")
}
if j >= counter {
fmt.Print("#")
}
}
counter = counter - 1
fmt.Println()
}
}
|
package main
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test_isHappy(t *testing.T) {
type args struct {
n int
}
tests := []struct {
name string
args args
want bool
}{
{
args: args{
n: 19,
},
want: true,
},
{
name: "for single digit",
args: args{
n: 2,
},
want: false,
},
{
name: "with 1111111",
args: args{n: 1111111},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, isHappy(tt.args.n), "isHappy(%v)", tt.args.n)
})
}
}
|
package basket
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/dvdalilue/invopop/db"
"github.com/dvdalilue/invopop/api/common"
)
// Mapper function to translate a basket model into a friendlier
// DTO. Get the basket/product relations and creates a 'summary'
// object with the list of items and total to pay (with discounts)
func toBasketDto(
c *gin.Context,
s db.Store,
b *db.Basket,
) (*Basket, *db.Error) {
var prices = make(map[int64]float64)
var quantities = make(map[int64]int)
products, err := s.GetBasketProducts(c, b.ID)
if err != nil {
return nil, err
}
var items []string = []string{}
var subTotal float64 = 0.0
for _, p := range products {
_, exists := quantities[p.ID]
if exists {
quantities[p.ID] += 1
} else {
prices[p.ID] = p.Price
quantities[p.ID] = 1
}
items = append(items, p.Name)
subTotal += p.Price
}
dm := NewDiscountManager(subTotal, quantities, prices)
return &Basket{b.ID, items, dm.getTotal()}, nil
}
// Handler function to create a basket. It's assumed that this is
// always successful. The basket model object is mapped to a DTO
// hidding the Store model
func createBasket(s db.Store) func(*gin.Context) {
handler := func(c *gin.Context) {
basket := s.CreateBasket(c)
res, err := toBasketDto(c, s, basket)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
c.JSON(http.StatusOK, res)
}
return handler
}
// Handler function to get all baskets. The basket model object is
// mapped to a DTO as before
func getBaskets(s db.Store) func(*gin.Context) {
handler := func(c *gin.Context) {
baskets := s.GetBaskets(c)
var res []*Basket = []*Basket{}
for _, bsk := range baskets {
basket, err := toBasketDto(c, s, bsk)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
res = append(res, basket)
}
c.JSON(http.StatusOK, &Baskets{res})
}
return handler
}
// Handler function to get a single basket, the 'id' is a
// path parameter extracted in the 'PathIDMiddleware'
func getBasket(s db.Store) func(*gin.Context) {
handler := func(c *gin.Context) {
objID := c.MustGet("id").(int64)
if objID < 0 {
return
}
basket, err := s.GetBasket(c, objID)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
res, err := toBasketDto(c, s, basket)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
c.JSON(http.StatusOK, res)
}
return handler
}
// Handler function to add a product to a basket, the 'id' is a
// path parameter extracted in the 'PathIDMiddleware' and the
// product 'id' is received in the request's body which is checked
// with the 'AddBasketProduct' DTO
func addBasketProduct(s db.Store) func(*gin.Context) {
handler := func(c *gin.Context) {
var req AddBasketProduct
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, common.APIResponse{
Code: http.StatusBadRequest,
Message: err.Error(),
})
return
}
objID := c.MustGet("id").(int64)
if objID < 0 {
return
}
basket, err := s.AddBasketProduct(c, objID, req.ProductID)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
res, err := toBasketDto(c, s, basket)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
c.JSON(http.StatusOK, res)
}
return handler
}
// Handler function to remove a basket, the 'id' is a path
// parameter extracted in the 'PathIDMiddleware'
func deleteBasket(s db.Store) func(*gin.Context) {
handler := func(c *gin.Context) {
objID := c.MustGet("id").(int64)
if objID < 0 {
return
}
err := s.DeleteBasket(c, objID)
if err != nil {
c.JSON(err.Code, err.ToAPIResponse())
return
}
c.JSON(http.StatusNoContent, nil)
}
return handler
}
// Includes basket operations in a router based on the prefix
// parameter and pass the store to the handlers
func IncludeOperations(r *gin.Engine, s db.Store, prefix string) {
basketAPI := r.Group(prefix)
{
basketAPI.POST("/", createBasket(s))
basketAPI.GET("/", getBaskets(s))
basketAPI.GET("/:id", getBasket(s))
basketAPI.DELETE("/:id", deleteBasket(s))
basketAPI.POST("/:id/product", addBasketProduct(s))
}
} |
package jsondiff
import (
"encoding/json"
"errors"
"reflect"
)
// Diff compares oldValue with newValue and returns a json tree of
// the changed values.
func Diff(oldValue interface{}, newValue interface{}) (json.RawMessage, error) {
return DiffFormat(oldValue, newValue, DefaultFormat)
}
func DefaultFormat(oldValue interface{}, newValue interface{}) (outputValue interface{}) {
return newValue
}
func NewValueFormat(oldValue interface{}, newValue interface{}) (outputValue interface{}) {
return newValue
}
func OldValueFormat(oldValue interface{}, newValue interface{}) (outputValue interface{}) {
return oldValue
}
func BothValuesAsMapFormat(oldValue interface{}, newValue interface{}) (outputValue interface{}) {
return map[string]interface{}{
"Old": oldValue,
"New": newValue,
}
}
func DiffOldNew(oldValue interface{}, newValue interface{}) (json.RawMessage, error) {
return DiffFormat(oldValue, newValue, BothValuesAsMapFormat)
}
// Formatter controls how to represent the diff in the output json message
// e.g. to show only the newValue, this func would return newValue
// e.g. to show only the oldValue, this func would return oldValue
// e.g. to show a {old: <v>, new: v}, this func would return map[string]interface{}{ "old": oldValue, "new": newValue }
//
// oldValue & newValue will always be non-struct types
type Formatter func(oldValue interface{}, newValue interface{}) (outputValue interface{})
func getIgnoredKeys(typ reflect.Type, baseAddr []string, maxDepth int) [][]string {
addrs := [][]string{}
if maxDepth == 0 {
return addrs
}
if typ.Kind() == reflect.Map {
return addrs
}
for i := 0; i < typ.NumField(); i++ {
tg := typ.Field(i).Tag.Get("jsondiff")
if tg == "-" {
addrs = append(addrs, append(baseAddr, typ.Field(i).Name))
}
child := baseType(typ.Field(i).Type)
if child.Kind() == reflect.Struct {
add := getIgnoredKeys(child, append(baseAddr, typ.Field(i).Name), maxDepth-1)
addrs = append(addrs, add...)
}
}
return addrs
}
func baseType(typ reflect.Type) reflect.Type {
for typ.Kind() == reflect.Ptr || typ.Kind() == reflect.Slice || typ.Kind() == reflect.Array {
typ = typ.Elem()
}
return typ
}
func DiffFormat(oldValue interface{}, newValue interface{}, formatter Formatter) (json.RawMessage, error) {
typ := reflect.TypeOf(oldValue)
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct && typ.Kind() != reflect.Map {
return nil, errors.New("jsondiff only supports structs (and pointers to structs)")
}
ignoreAddrs := getIgnoredKeys(typ, nil, 10)
jsonOld, err := json.Marshal(oldValue)
if err != nil {
return nil, err
}
jsonNew, err := json.Marshal(newValue)
if err != nil {
return nil, err
}
oldMap := map[string]interface{}{}
if err = json.Unmarshal(jsonOld, &oldMap); err != nil {
return nil, err
}
newMap := map[string]interface{}{}
if err = json.Unmarshal(jsonNew, &newMap); err != nil {
return nil, err
}
diff := map[string]interface{}{}
calculateDiff(diff, oldMap, newMap, formatter, nil, ignoreAddrs)
return json.Marshal(diff)
}
// calculateDiff calculates the difference between the old and new maps
// and fills diffResult with the result
func calculateDiff(
diffResult map[string]interface{},
oldMap map[string]interface{},
newMap map[string]interface{},
formatter Formatter,
addr []string,
ignoreAddrs [][]string,
) {
// iterate over keys
for _, k := range allKeys(oldMap, newMap) {
if containsAddr(ignoreAddrs, append(addr, k)) {
delete(diffResult, k)
continue
}
newProp := newMap[k]
oldProp := oldMap[k]
// check if the values are maps themselves
mpOld, oldIsMap := oldProp.(map[string]interface{})
mpNew, newIsMap := newProp.(map[string]interface{})
// one is a map, the other is not, must be a change
if oldIsMap != newIsMap {
diffResult[k] = formatter(wrapJson(oldProp, append(addr, k), ignoreAddrs), wrapJson(newProp, append(addr, k), ignoreAddrs))
continue
}
// both are maps, check subkeys for changes
if oldIsMap && newIsMap {
subResult := map[string]interface{}{}
calculateDiff(subResult, mpOld, mpNew, formatter, append(addr, k), ignoreAddrs)
// has subkey differences, add to diff
if len(subResult) > 0 {
diffResult[k] = subResult
}
continue
}
// use deepEquals to determine equality b/c we don't dive into array-diffing
// we just show the entire array as changed
if !deepEquals(oldProp, newProp) {
diffResult[k] = formatter(wrapJson(oldProp, append(addr, k), ignoreAddrs), wrapJson(newProp, append(addr, k), ignoreAddrs))
}
}
}
func wrapJson(value interface{}, currentAddr []string, ignoreAddrs [][]string) interface{} {
if value == nil {
return value
}
mp, ok := value.(map[string]interface{})
if !ok {
switch reflect.TypeOf(value).Kind() {
case reflect.Slice, reflect.Array:
list := []interface{}{}
arr := reflect.ValueOf(value)
for i := 0; i < arr.Len(); i++ {
list = append(list, wrapJson(arr.Index(i).Interface(), currentAddr, ignoreAddrs))
}
return list
}
return value
}
keysToRemove := []string{}
for k := range mp {
if containsAddr(ignoreAddrs, append(currentAddr, k)) {
keysToRemove = append(keysToRemove, k)
}
}
for _, k := range keysToRemove {
delete(mp, k)
}
for k, v := range mp {
mp[k] = wrapJson(v, append(currentAddr, k), ignoreAddrs)
}
return mp
}
func containsAddr(addrs [][]string, test []string) bool {
for _, addr := range addrs {
if equalsAddr(test, addr) {
return true
}
}
return false
}
func equalsAddr(test []string, prefix []string) bool {
if len(prefix) != len(test) {
return false
}
for i := 0; i < len(prefix); i++ {
if test[i] != prefix[i] {
return false
}
}
return true
}
func deepEquals(a interface{}, b interface{}) bool {
mpA, aIsMap := a.(map[string]interface{})
mpB, bIsMap := b.(map[string]interface{})
// one is a map, the other is not, must be a change
if aIsMap != bIsMap {
return false
}
// both are maps, check if entries match
if aIsMap && bIsMap {
for _, k := range allKeys(mpA, mpB) {
if !deepEquals(mpA[k], mpB[k]) {
return false
}
}
return true
}
arrA, aIsArr := a.([]interface{})
arrB, bIsArr := b.([]interface{})
// one is an array, the other isn't, must be change
if aIsArr != bIsArr {
return false
}
// both are arrays, check if entries match
if aIsArr && bIsArr {
if len(arrA) != len(arrB) {
return false
}
for i := 0; i < len(arrB); i++ {
if !deepEquals(arrA[i], arrB[i]) {
return false
}
}
return true
}
// primitive comparison
return a == b
}
// allKeys returns the combined keys of a & b (without duplicates)
func allKeys(a map[string]interface{}, b map[string]interface{}) []string {
keyMap := map[string]bool{}
for k := range a {
keyMap[k] = true
}
for k := range b {
keyMap[k] = true
}
var keys []string
for k := range keyMap {
keys = append(keys, k)
}
return keys
}
|
package rpc
import (
"github.com/jrapoport/gothic/api/grpc/rpc"
"github.com/jrapoport/gothic/core/tokens"
"github.com/jrapoport/gothic/models/user"
"github.com/jrapoport/gothic/utils"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/timestamppb"
)
// UserResponse maps the protobuf rpc UserResponse
type UserResponse api.UserResponse
// NewUserResponse returns a UserResponse for the supplied user.
func NewUserResponse(u *user.User) (*UserResponse, error) {
data, err := structpb.NewStruct(u.Data)
if err != nil {
return nil, err
}
return &UserResponse{
UserId: u.ID.String(),
Role: u.Role.String(),
Email: u.Email,
Username: u.Username,
Data: data,
}, nil
}
// MaskEmail masks Personally Identifiable Information (PII) from the user response
func (r *UserResponse) MaskEmail() {
r.Email = utils.MaskEmail(r.Email)
}
// NewBearerResponse returns a BearerResponse from a BearerToken
func NewBearerResponse(bt *tokens.BearerToken) *api.BearerResponse {
res := &api.BearerResponse{
Type: bt.Class().String(),
Access: bt.String(),
Refresh: bt.RefreshToken.String(),
}
if bt.ExpiredAt != nil {
res.ExpiresAt = timestamppb.New(*bt.ExpiredAt)
}
return res
}
|
package main
import (
"app"
"app/configSettting"
log "github.com/Sirupsen/logrus"
"github.com/garyburd/redigo/redis"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"os"
)
func main() {
gin.SetMode(gin.ReleaseMode)
// broadscaler admin
//
// adscoopsDB := fmt.Sprintf(os.Getenv("GO_DATABASE_CONN_ADSCOOPS"), "adscoops")
// adscoopsRealtimeDB := fmt.Sprintf(os.Getenv("GO_DATABASE_CONN_ADSCOOPS_RT"), "adscoops")
//
// broadvidadsDB := fmt.Sprintf(os.Getenv("GO_DATABASE_CONN"), "broadvidadserver")
adscoopsDB := "root:@/adscoops?parseTime=true"
adscoopsRealtimeDB := "root:@/adscoops?parseTime=true"
broadvidadsDB := "root:@/broadvidadserver?parseTime=true"
//Setup Adscoops DB connection
db, err := gorm.Open("mysql", adscoopsDB)
if err != nil {
log.Panicf("Error connecting to the DB: %s", err)
}
db.LogMode(true)
log.Info("Connected to the first DB connection")
//Setup Adscoops RealTime DB connection
dbrt, err := gorm.Open("mysql", adscoopsRealtimeDB)
if err != nil {
log.Panicf("Error connecting to the DB: %s", err)
}
dbrt.LogMode(true)
log.Info("Connected to the second DB connection")
//Setup Broadvid DB connection
dbbv, err := gorm.Open("mysql", broadvidadsDB)
if err != nil {
log.Panicf("Error connecting to the DB: %s", err)
}
log.Info("Connected to the first broadvid DB connection")
// Setup Redis
redisHost := "localhost:6379"
if os.Getenv("REDIS_HOST") != "" {
redisHost = os.Getenv("REDIS_HOST")
}
redisPool := redis.NewPool(func() (redis.Conn, error) {
var c redis.Conn
var err error
c, err = redis.Dial("tcp", redisHost)
if err != nil {
log.Panic("REDIS err", err)
return nil, err
}
if os.Getenv("REDIS_HOST") != "" {
c.Do("AUTH", configSettting.RedisAuthPassword)
}
return c, err
}, 10)
log.Info("Connected to redis")
log.Println(redisPool)
log.Println(dbbv)
db.DB().SetMaxIdleConns(10)
db.DB().SetMaxOpenConns(100)
configSettting.AdscoopsDB = db
configSettting.AdscoopsRealtimeDB = dbrt
configSettting.BroadvidDB = dbbv
configSettting.RedisPool = redisPool
app.InitAdmin()
}
|
package main
import "go/ast"
func main() {
var gd *ast.GenDecl
v := gd.Specs[0].(*ast.ValueSpec)
v.
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
)
// countAggregator is a simple processor that counts the number of rows it
// receives. It's a specialized aggregator that can be used for COUNT(*).
type countAggregator struct {
execinfra.ProcessorBase
input execinfra.RowSource
count int
}
var _ execinfra.Processor = &countAggregator{}
var _ execinfra.RowSource = &countAggregator{}
const countRowsProcName = "count rows"
func newCountAggregator(
flowCtx *execinfra.FlowCtx,
processorID int32,
input execinfra.RowSource,
post *execinfrapb.PostProcessSpec,
output execinfra.RowReceiver,
) (*countAggregator, error) {
ag := &countAggregator{}
ag.input = input
if execinfra.ShouldCollectStats(flowCtx.EvalCtx.Ctx(), flowCtx) {
ag.input = newInputStatCollector(input)
ag.ExecStatsForTrace = ag.execStatsForTrace
}
if err := ag.Init(
ag,
post,
[]*types.T{types.Int},
flowCtx,
processorID,
output,
nil, /* memMonitor */
execinfra.ProcStateOpts{
InputsToDrain: []execinfra.RowSource{ag.input},
},
); err != nil {
return nil, err
}
return ag, nil
}
func (ag *countAggregator) Start(ctx context.Context) {
ctx = ag.StartInternal(ctx, countRowsProcName)
ag.input.Start(ctx)
}
func (ag *countAggregator) Next() (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
for ag.State == execinfra.StateRunning {
row, meta := ag.input.Next()
if meta != nil {
if meta.Err != nil {
ag.MoveToDraining(meta.Err)
break
}
return nil, meta
}
if row == nil {
ret := make(rowenc.EncDatumRow, 1)
ret[0] = rowenc.EncDatum{Datum: tree.NewDInt(tree.DInt(ag.count))}
rendered, _, err := ag.Out.ProcessRow(ag.Ctx, ret)
// We're done as soon as we process our one output row, so we
// transition into draining state. We will, however, return non-nil
// error (if such occurs during rendering) separately below.
ag.MoveToDraining(nil /* err */)
if err != nil {
return nil, &execinfrapb.ProducerMetadata{Err: err}
}
return rendered, nil
}
ag.count++
}
return nil, ag.DrainHelper()
}
// execStatsForTrace implements ProcessorBase.ExecStatsForTrace.
func (ag *countAggregator) execStatsForTrace() *execinfrapb.ComponentStats {
is, ok := getInputStats(ag.input)
if !ok {
return nil
}
return &execinfrapb.ComponentStats{
Inputs: []execinfrapb.InputStats{is},
Output: ag.Out.Stats(),
}
}
|
package logic
import (
"fmt"
"io"
"bufio"
"testing"
)
type BasicReader struct{
data []byte
pos int
}
type ConsoleWriter struct{}
func (br *BasicReader) Read(p []byte) (n int, err error) {
l := len(p)
if l > len(br.data)-br.pos {
l = len(br.data)-br.pos
}
if l == 0 {
return 0, io.EOF
}
copy(p, br.data[br.pos:br.pos+l])
br.pos += l
return l, nil
}
func StringReader(src string) io.Reader {
return &BasicReader{data: []byte(src)}
}
func (cw *ConsoleWriter) Write(p []byte) (n int, err error) {
return fmt.Print(string(p))
}
func TestIO(t *testing.T) {
source := CreateBasicParticleSource()
var varX NamedParticle
varX = source.GetVariableNamed("x")
varY := source.GetVariableNamed("y")
var qEx Name
qEx = source.GetQuantifier("A")
predEq := source.GetPredicateName("=")
opImpl := source.GetOperator("->")
predFoo := source.GetPredicateName("Foo")
pred := source.Get(QUANTIFIED_PREDICATE, qEx, varX,
source.GetPredicateExpression(opImpl,
source.GetAtomicPredicate(predFoo,varX),
source.GetAtomicPredicate(predEq,varX,varY)))
out := &ConsoleWriter{}
wout := GetStandardWriter()
wout.Write(pred, out)
fmt.Println()
expected := "A$x:{->:Foo[$x],=[$x,$y]}"
var rin LogicReader
rin = GetStandardReader()
in := bufio.NewReader(StringReader(expected))
p, err := rin.ReadPredicate(source,in)
if err != nil {
t.Error(err)
} else {
wout.Write(p, out)
fmt.Println()
}
} |
package api
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/dolittle/platform-api/pkg/git"
gitStorage "github.com/dolittle/platform-api/pkg/platform/storage/git"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var gitTestCMD = &cobra.Command{
Use: "git-test",
Short: "Test git",
Run: func(cmd *cobra.Command, args []string) {
logrus.SetFormatter(&logrus.JSONFormatter{})
logrus.SetOutput(os.Stdout)
logContext := logrus.StandardLogger()
platformEnvironment := viper.GetString("tools.server.platformEnvironment")
gitRepoConfig := git.InitGit(logContext, platformEnvironment)
gitRepo := gitStorage.NewGitStorage(
logrus.WithField("context", "git-repo"),
gitRepoConfig,
)
dir := filepath.Join(gitRepo.Directory, "dev", "453e04a7-4f9d-42f2-b36c-d51fa2c83fa3")
err := os.MkdirAll(dir, 0755)
if err != nil {
fmt.Println(err)
return
}
microserviceID := "test"
data := []byte(`hi 1`)
filename := filepath.Join(dir, fmt.Sprintf("ms_%s.json", microserviceID))
err = ioutil.WriteFile(filename, data, 0644)
if err != nil {
fmt.Println("writeFile")
fmt.Println(err)
return
}
err = gitRepo.CommitPathAndPush(filename, "upsert microservice")
if err != nil {
fmt.Println("CommitPathAndPush")
fmt.Println(err)
return
}
},
}
|
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"os/signal"
redis "xj_web_server/cache"
"xj_web_server/config"
"xj_web_server/db"
"xj_web_server/httpserver"
"xj_web_server/tcp"
"xj_web_server/util"
"time"
)
func main() {
config.InitConfig("/../config/config.yml")
err := redis.InitRedis(config.GetRedis())
if err != nil {
log.Fatalf("redis init err %v", err)
return
}
initDB, err := db.InitDB(config.GetDb())
if err != nil {
log.Fatalf("db init err %v", err)
return
}
defer func() {
err := initDB.Close()
if err != nil {
log.Fatalf("db close err %v", err)
}
}()
initDBTreasure, err := db.InitDBTreasure(config.GetDBTreasure())
if err != nil {
log.Fatalf("db init Treasure err %v", err)
return
}
defer func() {
err := initDBTreasure.Close()
if err != nil {
log.Fatalf("db close Treasure err %v", err)
}
}()
initDBPlatform, err := db.InitDBPlatform(config.GetDBPlatform())
if err != nil {
log.Fatalf("db init Platform err %v", err)
return
}
defer func() {
err := initDBPlatform.Close()
if err != nil {
log.Fatalf("db close Platform err %v", err)
}
}()
initDBRecord, err := db.InitDBRecord(config.GetDBRecord())
if err != nil {
log.Fatalf("db init Record err %v", err)
return
}
defer func() {
err := initDBRecord.Close()
if err != nil {
log.Fatalf("db close Record err %v", err)
}
}()
router := httpserver.SetupRouter()
go tcp.Run(config.GetService().TCPPort)
server := &http.Server{
Addr: config.GetService().Port,
Handler: router,
ReadTimeout: config.GetService().AppReadTimeout * time.Second,
WriteTimeout: config.GetService().AppWriteTimeout * time.Second,
}
util.Logger.Info("|-----------------------------------|")
util.Logger.Info("| qp-web-server |")
util.Logger.Info("|-----------------------------------|")
util.Logger.Info("| Go Http Server Start Successful |")
util.Logger.Info("| HttpPort" + config.GetService().Port + " Pid:" + fmt.Sprintf("%d", os.Getpid()) + " |")
util.Logger.Info("| TcpPort" + config.GetService().TCPPort + " Pid:" + fmt.Sprintf("%d", os.Getpid()) + " |")
util.Logger.Info("|-----------------------------------|")
log.Println("|-----------------------------------|")
log.Println("| qp-web-server |")
log.Println("|-----------------------------------|")
log.Println("| Go Http Server Start Successful |")
log.Println("| HttpPort" + config.GetService().Port + " Pid:" + fmt.Sprintf("%d", os.Getpid()) + " |")
log.Println("| TcpPort" + config.GetService().TCPPort + " Pid:" + fmt.Sprintf("%d", os.Getpid()) + " |")
log.Println("|-----------------------------------|")
go func() {
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("HTTP server listen: %s\n", err)
}
}()
// 等待中断信号以优雅地关闭服务器(设置 5 秒的超时时间)
signalChan := make(chan os.Signal)
signal.Notify(signalChan, os.Interrupt)
sig := <-signalChan
log.Println("Get Signal:", sig)
log.Println("Shutdown Server ...")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
log.Fatal("Server Shutdown:", err)
}
log.Println("Server exiting")
//err = router.Run(config.GetService().Port)
//if err != nil {
// log.Fatalf("http server run err %v", err)
//}
}
|
package goods
import (
"flea-market/common/tools"
"flea-market/model/goodsModel"
"github.com/gin-gonic/gin"
"net/http"
"strconv"
)
func Delete(c *gin.Context) {
claims := tools.CheckToken(c)
goodsIdStr := c.Query("goods_id")
if goodsId,err := strconv.Atoi(goodsIdStr);err != nil {
c.JSON(http.StatusBadRequest,gin.H{"msg":err.Error()})
return
} else {
if _,err := goodsModel.UpdateStatus(goodsId,claims.UserId,2);err != nil {
c.JSON(http.StatusBadRequest,gin.H{"msg":err.Error()})
return
} else {
c.JSON(http.StatusOK,gin.H{"msg":"删除成功!"})
}
}
} |
package main
import "fmt"
type Student struct {
Id int
Name string
Gender bool
}
type Beaner interface {
GetName() string
SetName(string)
}
func (s Student) GetName() string { // 1
return s.Name
}
func (s *Student) SetName(name string) { // 2
s.Name = name
}
func main() {
// stu := Student{1, "lisi", true} // 3 类型:main.Student
// stu.SetName("zhangsan")
// fmt.Println(stu.GetName(), stu)
var stu Beaner = &Student{1, "lisi", true}
stu.SetName("zhangsan")
fmt.Println(stu.GetName(), stu)
}
|
package config
import (
"encoding/json"
"io/ioutil"
)
const minWidth = 320
const minHeight = 240
// Project configuration properties
// Engine needs to know where to locate its game data
type Config struct {
GameDirectory string
Video struct {
Width int
Height int
}
}
// @TODO Implement something nicer than this scoped variable
var config Config
// Get returns (kind-of) static config object
func Get() *Config {
return &config
}
// Load attempts to open and unmarshall
// json configuration
func Load(path string) (*Config, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return &config, err
}
err = json.Unmarshal(data, &config)
if err != nil {
return &config, err
}
validate()
return &config, nil
}
// validate that expected parameters with known
// boundaries or limitation fall within expectations.
func validate() {
if config.Video.Width < minWidth {
config.Video.Width = minWidth
}
if config.Video.Height < minHeight {
config.Video.Height = minHeight
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"bytes"
"context"
"strconv"
"strings"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/arc/optin"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MiniVM,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Ensures mini-ARCVM is functional and can be upgraded successfully",
Contacts: []string{
"wvk@chromium.org",
"arc-performance@google.com",
},
SoftwareDeps: []string{"android_vm", "chrome"},
Timeout: 4 * time.Minute,
VarDeps: []string{"ui.gaiaPoolDefault"},
})
}
func MiniVM(ctx context.Context, s *testing.State) {
// Setup Chrome and login as an opt-out user. mini-ARCVM should
// automatically start.
cr, err := chrome.New(ctx,
chrome.GAIALoginPool(s.RequiredVar("ui.gaiaPoolDefault")),
chrome.ARCSupported(),
chrome.ExtraArgs(arc.DisableSyncFlags()...))
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
defer cr.Close(ctx)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create test API connection: ", err)
}
s.Log("Waiting for crosvm to start")
if err := waitForCrosvmProcess(ctx); err != nil {
s.Fatal("Failed to wait for crosvm process to start: ", err)
}
// Get mini-VM CID from Concierge.
cid, err := getMiniVMCID(ctx)
if err != nil {
s.Fatal("Failed to get mini-VM CID: ", err)
}
// Check for init process inside the guest.
s.Log("Checking for init process in ARCVM")
if err := testing.Poll(ctx, func(ctx context.Context) error {
return vmCommand(ctx, cid, "pidof", "-s", "init").Run()
}, nil); err != nil {
s.Fatal("Failed to find init process in guest")
}
s.Log("Checking that arcvm-boot-notification-client is running")
const serviceProp = "init.svc.arcvm-boot-notification-client"
if err := waitForProp(ctx, cid, serviceProp, "running"); err != nil {
s.Fatal("Failed to check status of arcvm-boot-notification-client: ", err)
}
s.Log("Checking that /data is not mounted")
if err := testing.Poll(ctx, func(ctx context.Context) error {
if err := vmCommand(ctx, cid, "mountpoint", "-q", "/data").Run(); err == nil {
return errors.New("/data is mounted")
}
return nil
}, nil); err != nil {
s.Fatal("Failed to check that /data is not mounted: ", err)
}
s.Log("Upgrading the mini-ARCVM instance")
if err := optin.Perform(ctx, cr, tconn); err != nil {
s.Fatal("Unable to perform ARC optin: ", err)
}
a, err := arc.New(ctx, s.OutDir())
if err != nil {
s.Fatal("Failed to wait for ARC to finish booting: ", err)
}
defer a.Close(ctx)
s.Log("Checking that arcvm-boot-notification-client is stopped")
if err := waitForProp(ctx, cid, serviceProp, "stopped"); err != nil {
s.Fatal("Failed to check status of arcvm-boot-notification-client: ", err)
}
s.Log("Checking that /data is mounted")
if err := testing.Poll(ctx, func(ctx context.Context) error {
return vmCommand(ctx, cid, "mountpoint", "-q", "/data").Run()
}, nil); err != nil {
s.Fatal("Failed to check /data mount: ", err)
}
s.Log("Checking that upgrade props are set")
// Check a subset of upgrade props. Most of them are not readable from shell
// due to SELinux policy.
for _, prop := range []string{
"ro.boot.arc_demo_mode",
"ro.boot.enable_adb_sideloading",
} {
if err := waitForPropToExist(ctx, cid, prop); err != nil {
s.Fatalf("Failed to check upgrade prop %s: %v", prop, err)
}
}
}
// vmCommand creates a command to be run in the VM over vsh. We cannot use
// android-sh while mini-VM is running since it depends on having the
// cryptohome-ID set for the VM (the ID is not set until the VM is upgraded).
func vmCommand(ctx context.Context, cid int, command string, args ...string) *testexec.Cmd {
params := []string{"--user=root", "--cid=" + strconv.Itoa(cid), "--", command}
params = append(params, args...)
cmd := testexec.CommandContext(ctx, "vsh", params...)
cmd.Stdin = &bytes.Buffer{}
return cmd
}
// waitForCrosvmProcess waits for the crosvm process to start. We cannot use
// arc.WaitAndroidInit() here since that uses android-sh.
func waitForCrosvmProcess(ctx context.Context) error {
return testing.Poll(ctx, func(ctx context.Context) error {
_, err := arc.InitPID()
return err
}, nil)
}
// waitForProp polls until prop equals expected, or the context deadline is
// hit.
func waitForProp(ctx context.Context, cid int, prop, expected string) error {
return testing.Poll(ctx, func(ctx context.Context) error {
out, err := vmCommand(ctx, cid, "getprop", prop).Output()
if err != nil {
return err
}
val := strings.TrimSpace(string(out))
if val != expected {
return errors.Errorf("unexpected %s, got: %q; want: %q", prop, val, expected)
}
return nil
}, nil)
}
// waitForPropToExist polls until prop is set.
func waitForPropToExist(ctx context.Context, cid int, prop string) error {
return testing.Poll(ctx, func(ctx context.Context) error {
out, err := vmCommand(ctx, cid, "getprop", prop).Output()
if err != nil {
return err
}
val := strings.TrimSpace(string(out))
if len(val) == 0 {
return errors.Errorf("%s prop is not set", prop)
}
return nil
}, nil)
}
// getMiniVMCID returns the context identifier (CID) of the currently running
// mini-ARCVM instance, if any.
func getMiniVMCID(ctx context.Context) (int, error) {
out, err := testexec.CommandContext(
ctx, "concierge_client", "--get_vm_cid", "--name=arcvm",
"--cryptohome_id=ARCVM_DEFAULT_OWNER").Output(testexec.DumpLogOnError)
if err != nil {
return 0, err
}
cid, err := strconv.Atoi(strings.TrimSpace(string(out)))
if err != nil {
return 0, err
}
return cid, nil
}
|
package gwfunc
import (
//"sync"
"time"
)
func init() {
}
//var pool = sync.Pool {
// New: func() interface{}{
// var executor = &execution {
// hasTimeout: false,
// hasDone: make(chan bool, 1),
// }
// return executor
// },
//}
type execution struct {
f func()
state uint8
hasDone chan bool
hasTimeout bool
timeout time.Duration
}
func (e *execution) exec() <-chan bool {
var isTimeout = make(chan bool, 1)
go func(e *execution) {
defer e.release()
e.f()
if e.hasTimeout {
return
}
e.hasDone <- true
}(e)
select {
case <-time.After(e.timeout):
e.hasTimeout = true
isTimeout <- true
case <-e.hasDone:
isTimeout <- false
}
return isTimeout
}
func (e *execution) release() {
close(e.hasDone)
}
// WithTimeout represents a API that can be supports timeout control
// returns false if call f has done (not timeout) else true.
func Timeout(f func(), timeout time.Duration) bool {
var executor = execution{
f: f,
timeout: timeout,
hasDone: make(chan bool, 1),
}
return <-executor.exec()
}
//func WaitAll(timeout time.Duration, funcList ...func()) bool {
//
//}
//
//func WaitOne(timeout time.Duration, funcList ...func()) bool {
//}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
// Filesystem types used in statfs(2).
//
// See linux/magic.h.
const (
ANON_INODE_FS_MAGIC = 0x09041934
CGROUP_SUPER_MAGIC = 0x27e0eb
DEVPTS_SUPER_MAGIC = 0x00001cd1
EXT_SUPER_MAGIC = 0xef53
FUSE_SUPER_MAGIC = 0x65735546
MQUEUE_MAGIC = 0x19800202
NSFS_MAGIC = 0x6e736673
OVERLAYFS_SUPER_MAGIC = 0x794c7630
PIPEFS_MAGIC = 0x50495045
PROC_SUPER_MAGIC = 0x9fa0
RAMFS_MAGIC = 0x09041934
SOCKFS_MAGIC = 0x534F434B
SYSFS_MAGIC = 0x62656572
TMPFS_MAGIC = 0x01021994
V9FS_MAGIC = 0x01021997
)
// Filesystem path limits, from uapi/linux/limits.h.
const (
NAME_MAX = 255
PATH_MAX = 4096
)
// Statfs is struct statfs, from uapi/asm-generic/statfs.h.
//
// +marshal
type Statfs struct {
// Type is one of the filesystem magic values, defined above.
Type uint64
// BlockSize is the optimal transfer block size in bytes.
BlockSize int64
// Blocks is the maximum number of data blocks the filesystem may store, in
// units of BlockSize.
Blocks uint64
// BlocksFree is the number of free data blocks, in units of BlockSize.
BlocksFree uint64
// BlocksAvailable is the number of data blocks free for use by
// unprivileged users, in units of BlockSize.
BlocksAvailable uint64
// Files is the number of used file nodes on the filesystem.
Files uint64
// FileFress is the number of free file nodes on the filesystem.
FilesFree uint64
// FSID is the filesystem ID.
FSID [2]int32
// NameLength is the maximum file name length.
NameLength uint64
// FragmentSize is equivalent to BlockSize.
FragmentSize int64
// Flags is the set of filesystem mount flags.
Flags uint64
// Spare is unused.
Spare [4]uint64
}
// Whence argument to lseek(2), from include/uapi/linux/fs.h.
const (
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
SEEK_DATA = 3
SEEK_HOLE = 4
)
// Sync_file_range flags, from include/uapi/linux/fs.h
const (
SYNC_FILE_RANGE_WAIT_BEFORE = 1
SYNC_FILE_RANGE_WRITE = 2
SYNC_FILE_RANGE_WAIT_AFTER = 4
)
// Flag argument to renameat2(2), from include/uapi/linux/fs.h.
const (
RENAME_NOREPLACE = (1 << 0) // Don't overwrite target.
RENAME_EXCHANGE = (1 << 1) // Exchange src and dst.
RENAME_WHITEOUT = (1 << 2) // Whiteout src.
)
|
// Attempted the following name for package:
// - authenticator: this sounds more like a verb
// - authentication: too long
// - userlogin: is too specific, since user can also register
// - loginUser: breaks the convention, since package name is preferable a noun.
// - authz and authn is better.
package authnsvc
import (
"context"
"github.com/pkg/errors"
"github.com/alextanhongpin/go-microservice/database"
"github.com/alextanhongpin/go-microservice/pkg/govalidator"
"github.com/alextanhongpin/passwd"
)
type (
// RegisterRequest ...
RegisterRequest struct {
Username string `json:"username" validate:"required,email"`
Password string `json:"password" validate:"required,min=8"`
}
// RegisterResponse ...
RegisterResponse struct {
User User `json:"user"`
}
registerRepository interface {
Create(username, password string) (User, error)
}
// RegisterUseCase ...
RegisterUseCase struct {
users registerRepository
}
)
// NewRegisterUseCase returns a new use case to register user.
func NewRegisterUseCase(users registerRepository) *RegisterUseCase {
return &RegisterUseCase{
users: users,
}
}
// Register creates a new account for new users.
func (r *RegisterUseCase) Register(ctx context.Context, req RegisterRequest) (*RegisterResponse, error) {
if err := govalidator.Validate.Struct(req); err != nil {
return nil, errors.Wrap(err, "validate register request failed")
}
// NOTE: There's no checking if the user exists, because there should
// be a constraint in the database that the username/email is unique.
hashedPassword, err := passwd.Hash(req.Password)
if err != nil {
return nil, errors.Wrap(err, "hash password failed")
}
user, err := r.users.Create(req.Username, hashedPassword)
if err != nil {
if database.IsDuplicateEntry(err) {
return nil, errors.New("user already exists")
}
return nil, errors.Wrap(err, "create user failed")
}
return &RegisterResponse{user}, errors.Wrap(err, "register user failed")
}
|
package config
import (
"encoding/json"
"errors"
"io/ioutil"
"os"
"path"
"path/filepath"
)
const (
configDirectoryName = "wundercli"
configFileName = "config.json"
)
var Config struct {
AccessToken string
}
type ConfigDoesNotExist error
// Get config file path.
func getConfigPath() string {
configDir := os.Getenv("XDG_CONFIG_HOME")
if configDir != "" {
// $XDG_CONFIG_HOME/wundercli/config.json
return filepath.Join(configDir, configDirectoryName, configFileName)
}
homeDir := os.Getenv("HOME")
if homeDir != "" {
// If $XDG_CONFIG_HOME is not set then return
// $HOME/.config/wundercli/config.json
return filepath.Join(homeDir, ".config", configDirectoryName, configFileName)
}
// if $HOME is not set then return
// config.json located in directory containing executable file.
absPath, _ := filepath.Abs(filepath.Dir(os.Args[0]))
return filepath.Join(absPath, configFileName)
}
// Saves current config to file.
func SaveConfig() (err error) {
configPath := getConfigPath()
data, err := json.MarshalIndent(Config, "", " ")
if err != nil {
return errors.New("JSON encode error.")
}
err = os.MkdirAll(path.Dir(configPath), 0744)
if err != nil {
return errors.New("error while creating config directory")
}
err = ioutil.WriteFile(configPath, append(data, byte('\n')), 0600)
if err != nil {
return errors.New("error while creating config file")
}
return nil
}
// Does all config-related: gets its path, opens it and puts it to variable.
// If config was opened successfully, return true,
// if it doesn't exist, return false,
// return an error otherwise.
func OpenConfig() (exists bool, err error) {
configPath := getConfigPath()
if _, err := os.Stat(configPath); err != nil {
if os.IsNotExist(err) {
return false, nil
} else {
return false, errors.New("config reading error")
}
} else {
// Read config from file.
data, err := ioutil.ReadFile(configPath)
if err != nil {
return false, err
}
// Decode it.
err = json.Unmarshal(data, &Config)
if err != nil {
return false, err
}
return true, nil
}
}
|
package main
import (
"bufio"
"fmt"
"os"
)
type ByteCounter int
func (c *ByteCounter) Write(p []byte) (int, error) {
*c += ByteCounter(len(p)) // convert in to ByteCounter
return len(p), nil
}
func main() {
fmt.Printf("Words count: %d\n", countWords())
fmt.Printf("Lines count: %d\n", countLines())
}
func countLines() int {
file, _ := os.Open("/tmp/lines.txt")
defer file.Close()
scanner := bufio.NewScanner(file)
lc := 0
for scanner.Scan() {
lc++
}
return lc
}
func countWords() int {
file, _ := os.Open("/tmp/lines.txt")
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanWords)
wc := 0
for scanner.Scan() {
wc++
}
return wc
}
|
package usecase
type CreateProductInputPort interface {
CreateProduct(interface{}) (interface{}, error)
}
type CreateProductOutputPort interface {
CreateProductResponse(interface{}) (interface{}, error)
} |
package smoothfs
import (
"log"
"os"
"path/filepath"
"syscall"
"bazil.org/fuse"
"bazil.org/fuse/fs"
)
// SmoothFS implements an IO smoothing virtual filesystem.
type SmoothFS struct {
SrcDir string // The directory we are mirroring
CacheDir string // A location locally our cache entries are stored.
NumSlaves int
io_queue chan WorkEntry
clean_queue chan WorkEntry
}
// Root is called to get the root directory node of this filesystem.
func (fs *SmoothFS) Root() (fs.Node, fuse.Error) {
log.Printf("Asked for root\n")
return &Dir{FS: fs, RelPath: "", AbsPath: fs.SrcDir}, nil
}
// Setup performs setup of SmoothFS's internal fields.
func (fs *SmoothFS) Setup() {
if fs.io_queue == nil {
fs.io_queue = make(chan WorkEntry)
for i := 0; i < fs.NumSlaves; i++ {
go io_slave(fs, i, fs.io_queue)
}
}
if fs.clean_queue == nil {
fs.clean_queue = make(chan WorkEntry)
go io_slave(fs, fs.NumSlaves, fs.clean_queue)
}
}
// Destroy is called when the SmoothFS is shutting down, and cleans up internal structs.
func (fs *SmoothFS) Destroy() {
close(fs.io_queue)
close(fs.clean_queue)
}
// Init is called to initialize the FUSE filesystem.
func (fs *SmoothFS) Init(req *fuse.InitRequest, resp *fuse.InitResponse, intr fs.Intr) fuse.Error {
log.Printf("In init")
fs.Setup()
resp.Flags |= fuse.InitAsyncRead
resp.MaxWrite = BLOCK_SIZE
return nil
}
// Dir implements both Node and Handle for the root directory.
type Dir struct {
FS *SmoothFS
RelPath string
AbsPath string
}
// Attr gets the attributes for this directory.
func (d *Dir) Attr() fuse.Attr {
info, err := os.Stat(d.AbsPath)
if err != nil {
return fuse.Attr{}
}
return fuseAttrFromStat(info).Attr
}
// Lookup a sub-path within this directory, returning a node or error.
func (d *Dir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
log.Printf("In lookup\n")
absPath := filepath.Join(d.AbsPath, name)
relPath := filepath.Join(d.RelPath, name)
info, err := os.Stat(absPath)
if err != nil {
return nil, fuse.ENOENT
} else if info.IsDir() {
return &Dir{FS: d.FS, RelPath: relPath, AbsPath: absPath}, nil
} else {
return &File{FS: d.FS, RelPath: relPath, AbsPath: absPath}, nil
}
}
// ReadDir is called by FUSE to list the entries within this directory.
func (d *Dir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
log.Printf("In readdir\n")
fp, err := os.Open(d.AbsPath)
if err != nil {
log.Printf("error %s\n", err.Error())
return nil, fuse.ENOENT
}
infos, rd_err := fp.Readdir(0)
if rd_err != nil {
log.Printf("Read error %s\n", rd_err.Error())
return nil, fuse.EIO
}
dirs := make([]fuse.Dirent, 0, len(infos))
for _, info := range infos {
attr := fuseAttrFromStat(info)
ent := fuse.Dirent{
Name: attr.Name,
Inode: attr.Inode,
Type: modeDT(info.Mode()),
}
dirs = append(dirs, ent)
}
log.Printf("numdirs: %d\n", len(dirs))
return dirs, nil
}
// File implements both Node and Handle.
type File struct {
AbsPath string // Absolute path of this file's backing file
RelPath string // Relative path of this file within the FUSE filesystem
FS *SmoothFS
fp *os.File
cf *CachedFile
}
// Attr is called by FUSE to get attributes of this file.
func (f *File) Attr() fuse.Attr {
info, err := os.Stat(f.AbsPath)
if err != nil {
return fuse.Attr{}
}
return fuseAttrFromStat(info).Attr
}
func (f *File) getFP() *os.File {
if f.fp == nil {
f.fp, _ = os.Open(f.AbsPath)
}
return f.fp
}
func (f *File) getCachedFile() *CachedFile {
if f.cf == nil {
f.cf = NewCachedFile(f)
}
return f.cf
}
// Read is called by FUSE to read a specific range of bytes from this file.
func (f *File) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fs.Intr) fuse.Error {
log.Println("In File.Read")
reqgetter := make(chan []byte)
cf := f.getCachedFile()
cf.ReadRequest(req.Offset, req.Size, reqgetter)
select {
case dbytes := <-reqgetter:
resp.Data = dbytes
return nil
case <-intr:
log.Printf("Got INTR for some reason.")
return fuse.Errno(syscall.EINTR)
}
return nil
}
|
package main
import (
"fmt"
_ "github.com/brewlin/net-protocol/pkg/logging"
"github.com/brewlin/net-protocol/protocol/transport/udp/client"
)
func main() {
con := client.NewClient("10.0.2.15", 9000)
defer con.Close()
if err := con.Connect(); err != nil {
fmt.Println(err)
}
con.Write([]byte("send msg"))
res, err := con.Read()
if err != nil {
fmt.Println(err)
con.Close()
return
}
// var p [8]byte
// res, _ := con.Readn(p[:1])
fmt.Println(string(res))
}
|
// This file is part of CycloneDX GoMod
//
// Licensed under the Apache License, Version 2.0 (the “License”);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an “AS IS” BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) OWASP Foundation. All Rights Reserved.
package gomod
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseVendoredModules(t *testing.T) {
cwd, err := os.Getwd()
require.NoError(t, err)
t.Run("Simple", func(t *testing.T) {
goModVendorOutput := "# github.com/CycloneDX/cyclonedx-go1 v0.1.0"
modules, err := parseVendoredModules(cwd, strings.NewReader(goModVendorOutput))
require.NoError(t, err)
require.Len(t, modules, 1)
assert.Equal(t, "github.com/CycloneDX/cyclonedx-go1", modules[0].Path)
assert.Equal(t, "v0.1.0", modules[0].Version)
assert.Equal(t, filepath.Join(cwd, "vendor", "github.com/CycloneDX/cyclonedx-go1"), modules[0].Dir)
assert.True(t, modules[0].Vendored)
})
t.Run("Replacement PathVersion to PathVersion", func(t *testing.T) {
goModVendorOutput := "# github.com/CycloneDX/cyclonedx-go v0.1.0 => github.com/nscuro/cyclonedx-go v0.1.1"
modules, err := parseVendoredModules(cwd, strings.NewReader(goModVendorOutput))
require.NoError(t, err)
require.Len(t, modules, 1)
assert.Equal(t, "github.com/CycloneDX/cyclonedx-go", modules[0].Path)
assert.Equal(t, "v0.1.0", modules[0].Version)
assert.Empty(t, modules[0].Dir)
assert.False(t, modules[0].Vendored)
assert.NotNil(t, modules[0].Replace)
assert.Equal(t, "github.com/nscuro/cyclonedx-go", modules[0].Replace.Path)
assert.Equal(t, "v0.1.1", modules[0].Replace.Version)
assert.Equal(t, filepath.Join(cwd, "vendor", "github.com/CycloneDX/cyclonedx-go"), modules[0].Replace.Dir)
assert.True(t, modules[0].Replace.Vendored)
})
t.Run("Replacement Path to PathVersion", func(t *testing.T) {
goModVendorOutput := "# github.com/CycloneDX/cyclonedx-go => github.com/nscuro/cyclonedx-go v0.1.1"
modules, err := parseVendoredModules(cwd, strings.NewReader(goModVendorOutput))
require.NoError(t, err)
require.Len(t, modules, 1)
assert.Equal(t, "github.com/CycloneDX/cyclonedx-go", modules[0].Path)
assert.Empty(t, modules[0].Version)
assert.Empty(t, modules[0].Dir)
assert.False(t, modules[0].Vendored)
assert.NotNil(t, modules[0].Replace)
assert.Equal(t, "github.com/nscuro/cyclonedx-go", modules[0].Replace.Path)
assert.Equal(t, "v0.1.1", modules[0].Replace.Version)
assert.Equal(t, filepath.Join(cwd, "vendor", "github.com/CycloneDX/cyclonedx-go"), modules[0].Replace.Dir)
assert.True(t, modules[0].Replace.Vendored)
})
t.Run("Replacement PathVersion to Path", func(t *testing.T) {
goModVendorOutput := "# github.com/CycloneDX/cyclonedx-go v0.1.0 => ../cyclonedx-go"
modules, err := parseVendoredModules(cwd, strings.NewReader(goModVendorOutput))
require.NoError(t, err)
require.Len(t, modules, 1)
assert.Equal(t, "github.com/CycloneDX/cyclonedx-go", modules[0].Path)
assert.Equal(t, "v0.1.0", modules[0].Version)
assert.Empty(t, modules[0].Dir)
assert.False(t, modules[0].Vendored)
assert.NotNil(t, modules[0].Replace)
assert.Equal(t, "../cyclonedx-go", modules[0].Replace.Path)
assert.Empty(t, modules[0].Replace.Version)
assert.Equal(t, filepath.Join(cwd, "vendor", "github.com/CycloneDX/cyclonedx-go"), modules[0].Replace.Dir)
assert.True(t, modules[0].Replace.Vendored)
})
t.Run("Replacement Path to Path", func(t *testing.T) {
goModVendorOutput := "# github.com/CycloneDX/cyclonedx-go => ../cyclonedx-go"
modules, err := parseVendoredModules(cwd, strings.NewReader(goModVendorOutput))
require.NoError(t, err)
require.Len(t, modules, 1)
assert.Equal(t, "github.com/CycloneDX/cyclonedx-go", modules[0].Path)
assert.Empty(t, modules[0].Version)
assert.Empty(t, modules[0].Dir)
assert.False(t, modules[0].Vendored)
assert.NotNil(t, modules[0].Replace)
assert.Equal(t, "../cyclonedx-go", modules[0].Replace.Path)
assert.Empty(t, modules[0].Replace.Version)
assert.Equal(t, filepath.Join(cwd, "vendor", "github.com/CycloneDX/cyclonedx-go"), modules[0].Replace.Dir)
assert.True(t, modules[0].Replace.Vendored)
})
t.Run("Duplicates", func(t *testing.T) {
goModVendorOutput := `
# github.com/CycloneDX/cyclonedx-go v0.1.0 => github.com/nscuro/cyclonedx-go v0.1.1
# github.com/CycloneDX/cyclonedx-go => github.com/nscuro/cyclonedx-go v0.1.1
# github.com/CycloneDX/cyclonedx-go v0.1.0 => ../cyclonedx-go
# github.com/CycloneDX/cyclonedx-go => ../cyclonedx-go`
modules, err := parseVendoredModules(cwd, strings.NewReader(goModVendorOutput))
require.NoError(t, err)
require.Len(t, modules, 1)
assert.Equal(t, "github.com/CycloneDX/cyclonedx-go", modules[0].Path)
assert.Equal(t, "v0.1.0", modules[0].Version)
assert.Empty(t, modules[0].Dir)
assert.False(t, modules[0].Vendored)
assert.NotNil(t, modules[0].Replace)
assert.Equal(t, "github.com/nscuro/cyclonedx-go", modules[0].Replace.Path)
assert.Equal(t, "v0.1.1", modules[0].Replace.Version)
assert.Equal(t, filepath.Join(cwd, "vendor", "github.com/CycloneDX/cyclonedx-go"), modules[0].Replace.Dir)
assert.True(t, modules[0].Replace.Vendored)
})
}
|
package config
import (
"testing"
"github.com/stretchr/testify/require"
)
func Test_EchoTmpl(t *testing.T) {
tmpl := &Base{}
result, err := EchoTmpl(tmpl)
t.Log(result)
require.NoError(t, err, "tmpl")
}
|
package main
import (
"antalk-go/internal/common"
"antalk-go/internal/seq/protocol/http"
"flag"
"log"
"os"
"os/signal"
"syscall"
)
var (
configName = flag.String("config_name", "seq", "config name")
configType = flag.String("config_type", "toml", "config type")
configPath = flag.String("config_path", ".", "config path")
)
type server struct {
name string
}
func main() {
flag.Parse()
c := &common.Config{
Name: *configName,
Type: *configType,
Path: *configPath,
}
c.Init()
httpSrv := http.New(c)
// signal
sg := make(chan os.Signal, 1)
signal.Notify(sg, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-sg
log.Printf("seq server get a signal %s\n", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
httpSrv.Close()
return
case syscall.SIGHUP:
default:
return
}
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crostini
import (
"context"
"os"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/crostini"
"chromiumos/tast/local/crostini/ui/settings"
"chromiumos/tast/local/disk"
"chromiumos/tast/local/vm"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ResizeSpaceConstrained,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test resizing disk of Crostini from the Settings with constrained host disk space",
Contacts: []string{"nverne@google.com", "clumptini+oncall@google.com"},
Attr: []string{"group:mainline"},
SoftwareDeps: []string{"chrome", "vm_host"},
Params: []testing.Param{
// Parameters generated by params_test.go. DO NOT EDIT.
{
Name: "buster_stable",
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
}, {
Name: "buster_unstable",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
}, {
Name: "bullseye_stable",
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
}, {
Name: "bullseye_unstable",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
},
},
})
}
func ResizeSpaceConstrained(ctx context.Context, s *testing.State) {
pre := s.FixtValue().(crostini.FixtureData)
cr := pre.Chrome
tconn := pre.Tconn
cont := pre.Cont
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 30*time.Second)
defer cancel()
// Open the Linux settings.
st, err := settings.OpenLinuxSettings(ctx, tconn, cr)
if err != nil {
s.Fatal("Failed to open Linux Settings: ", err)
}
const GB uint64 = 1 << 30
targetDiskSizeBytes := []uint64{20 * GB, 10 * GB, 5 * GB, 10 * GB}
currSizeStr, err := st.GetDiskSize(ctx)
if err != nil {
s.Fatal("Failed to get current disk size: ", err)
}
currSizeBytes, err := settings.ParseDiskSize(currSizeStr)
if err != nil {
s.Fatalf("Failed to parse disk size string %s: %v", currSizeStr, err)
}
defer func(ctx context.Context, targetSize uint64) {
if _, _, err := st.Resize(ctx, targetSize); err != nil {
s.Logf("Failed to resize to the original disk size: %d", targetSize)
}
}(cleanupCtx, currSizeBytes)
for _, tBytes := range targetDiskSizeBytes {
testResize(ctx, s, cont, st, currSizeBytes, tBytes)
currSizeBytes = tBytes
}
}
func testResize(ctx context.Context, s *testing.State, cont *vm.Container, st *settings.Settings, currSizeBytes, targetSizeBytes uint64) error {
const fillPath = "/home/user/"
freeSpace, err := disk.FreeSpace(fillPath)
if err != nil {
s.Fatalf("Failed to read free space in %s: %v", fillPath, err)
}
if freeSpace < targetSizeBytes {
s.Logf("Not enough free space to run test. Have %v, need %v", freeSpace, targetSizeBytes)
return nil
}
fillFile, err := disk.FillUntil(fillPath, targetSizeBytes)
if err != nil {
s.Fatal("Failed to fill disk space: ", err)
}
// Defer removing the files in case of errors.
defer func() {
if err = os.Remove(fillFile); err != nil {
s.Fatalf("Failed to remove fill file %s: %v", fillFile, err)
}
}()
s.Logf("Resizing from %v to %v", currSizeBytes, targetSizeBytes)
sizeOnSlider, sizeInCont, err := st.Resize(ctx, targetSizeBytes)
if err != nil {
s.Fatal("Failed to resize back to the default value: ", err)
}
if err := st.VerifyResizeResults(ctx, cont, sizeOnSlider, sizeInCont); err != nil {
s.Fatal("Failed to verify resize results: ", err)
}
return nil
}
|
package array
import (
"encoding/json"
"fmt"
"github.com/project-flogo/core/data/coerce"
"github.com/stretchr/testify/assert"
"testing"
)
func TestFlatternFunc(t *testing.T) {
fn := &fnFlatten{}
str := `[
[
{
"id": 1
}
],
[
{
"id": 2
},
{
"id": 3
}
]
]`
var d interface{}
err := json.Unmarshal([]byte(str), &d)
assert.Nil(t, err)
final, err := fn.Eval(d, -1)
assert.Nil(t, err)
print(final)
assert.Equal(t, 3, len(final.([]interface{})))
obj := map[string]string{"key1": "value1", "key2": "value2"}
var aa = []interface{}{[]interface{}{obj}, []interface{}{obj}, []interface{}{obj}}
final, err = fn.Eval(aa, -1)
assert.Nil(t, err)
print(final)
assert.Equal(t, 3, len(final.([]interface{})))
obj = map[string]string{"key1": "value1", "key2": "value2"}
aa = []interface{}{obj}
final, err = fn.Eval(aa, -1)
assert.Nil(t, err)
print(final)
assert.Equal(t, aa, final)
str = "[1, 2, [3, 4, [5, 6, [7, 8, [9, 10]]]]]"
a, _ := coerce.ToArray(str)
final, err = fn.Eval(a, -1)
assert.Nil(t, err)
print(final)
assert.Equal(t, 10, len(final.([]interface{})))
str = "[1, 2, [3, 4, [5, 6, [7, 8, [9, 10]]]]]"
a, _ = coerce.ToArray(str)
final, err = fn.Eval(a, 2)
assert.Nil(t, err)
print(final)
assert.Equal(t, 7, len(final.([]interface{})))
}
func print(in interface{}) {
v, _ := json.Marshal(in)
fmt.Println(string(v))
}
|
package binding
package binding
// Kafka defines the operation bindings for the Kafka protocol
type Kafka {
// GroupID is the ID of the consumer group.
GroupID string
// ClientID is the ID of the consumer inside a consumer group.
ClientID string
// BindingVersion specifies the version of this binding. If omitted, "latest" MUST be assumed.
BindingVersion string
} |
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package inputs
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bundles/cros/inputs/emojipicker"
"chromiumos/tast/local/bundles/cros/inputs/fixture"
"chromiumos/tast/local/bundles/cros/inputs/pre"
"chromiumos/tast/local/bundles/cros/inputs/testserver"
"chromiumos/tast/local/bundles/cros/inputs/util"
"chromiumos/tast/local/chrome/ime"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/useractions"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: PhysicalKeyboardEmoji,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Checks that right click input field and select emoji with physical keyboard",
Contacts: []string{"essential-inputs-gardener-oncall@google.com", "essential-inputs-team@google.com"},
Attr: []string{"group:mainline", "group:input-tools"},
SoftwareDeps: []string{"chrome", "chrome_internal"},
SearchFlags: util.IMESearchFlags([]ime.InputMethod{ime.DefaultInputMethod}),
Params: []testing.Param{
{
Fixture: fixture.ClamshellNonVK,
ExtraAttr: []string{"informational"},
ExtraHardwareDeps: hwdep.D(hwdep.Model(pre.StableModels...), hwdep.SkipOnModel("kefka")),
},
{
Name: "informational",
Fixture: fixture.ClamshellNonVK,
ExtraAttr: []string{"informational"},
// Skip on grunt & zork boards due to b/213400835.
ExtraHardwareDeps: hwdep.D(pre.InputsUnstableModels, hwdep.SkipOnPlatform("grunt", "zork")),
},
/* Disabled due to <1% pass rate over 30 days. See b/246818430
{
Name: "lacros",
Fixture: fixture.LacrosClamshellNonVK,
ExtraSoftwareDeps: []string{"lacros_stable"},
ExtraAttr: []string{"informational"},
ExtraHardwareDeps: hwdep.D(hwdep.Model(pre.StableModels...), hwdep.SkipOnModel("kefka")),
}
*/
},
})
}
func PhysicalKeyboardEmoji(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(fixture.FixtData).Chrome
tconn := s.FixtValue().(fixture.FixtData).TestAPIConn
uc := s.FixtValue().(fixture.FixtData).UserContext
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree")
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to get keyboard: ", err)
}
defer kb.Close()
its, err := testserver.LaunchBrowser(ctx, s.FixtValue().(fixture.FixtData).BrowserType, cr, tconn)
if err != nil {
s.Fatal("Failed to launch inputs test server: ", err)
}
defer its.CloseAll(cleanupCtx)
inputField := testserver.TextAreaInputField
inputEmoji := "😂"
ui := emojipicker.NewUICtx(tconn)
s.Run(ctx, "emoji_input", func(ctx context.Context, s *testing.State) {
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_emoji_input")
if err := its.InputEmojiWithEmojiPicker(uc, inputField, inputEmoji)(ctx); err != nil {
s.Fatal("Failed to verify emoji picker: ", err)
}
})
// Tap ESC key to dismiss emoji picker.
// This test is also covered in browser test https://source.chromium.org/chromium/chromium/src/+/main:chrome/browser/ui/views/bubble/bubble_contents_wrapper_unittest.cc;drc=7059ce9510b276afe73ce0bc389a72b58f482420;l=154.
// Keep this test since it is still required to complete the entire E2E test journey.
dismissByKeyboardAction := uiauto.UserAction(
"Dismiss emoji picker",
uiauto.Combine("dismiss emoji picker by tapping ESC key",
kb.AccelAction("ESC"),
emojipicker.WaitUntilGone(tconn),
),
uc,
&useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeTestScenario: "Dismiss emoji picker by tapping ESC key",
useractions.AttributeFeature: useractions.FeatureEmojiPicker,
},
},
)
// Mouse click to dismiss emoji picker.
dismissByMouseAction := uiauto.UserAction(
"Dismiss emoji picker",
uiauto.Combine("dismiss emoji picker by mouse click",
func(ctx context.Context) error {
emojiPickerLoc, err := ui.Location(ctx, emojipicker.RootFinder)
if err != nil {
return errors.Wrap(err, "failed to get emoji picker location")
}
// Click anywhere outside emoji picker will dismiss it.
// Using TopRight + 50 is safe in this case.
clickPoint := coords.Point{
X: emojiPickerLoc.TopRight().X + 50,
Y: emojiPickerLoc.TopRight().Y,
}
return ui.MouseClickAtLocation(0, clickPoint)(ctx)
},
emojipicker.WaitUntilGone(tconn),
),
uc,
&useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeTestScenario: "Dismiss emoji picker by mouse click",
useractions.AttributeFeature: useractions.FeatureEmojiPicker,
},
},
)
s.Run(ctx, "recently_used", func(ctx context.Context, s *testing.State) {
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_recently")
if err := uiauto.Combine("validate recently used emojis",
its.TriggerEmojiPickerFromContextMenu(inputField),
// Clear recent used emojis.
uiauto.UserAction(
"Clear recently used emoji",
uiauto.Combine("clear recently used emoji",
ui.LeftClick(emojipicker.RecentUsedMenu),
ui.LeftClick(emojipicker.ClearRecentlyUsedButtonFinder),
ui.WaitUntilGone(emojipicker.RecentUsedMenu),
dismissByKeyboardAction,
// Launch emoji picker again to confirm it is not only removed from UI.
its.TriggerEmojiPickerFromContextMenu(inputField),
ui.WaitUntilGone(emojipicker.RecentUsedMenu),
dismissByMouseAction,
),
uc,
&useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeFeature: useractions.FeatureEmojiPicker,
},
},
),
)(ctx); err != nil {
s.Fatal("Failed to clear recently used emoji: ", err)
}
})
}
|
package logs
import (
"os"
log "github.com/sirupsen/logrus"
)
// Setup set format, output, and level of logs
func Setup() {
log.SetFormatter(&log.TextFormatter{})
log.SetOutput(os.Stdout)
log.SetLevel(log.DebugLevel)
}
|
package p_00101_00200
// 146. LRU Cache, https://leetcode.com/problems/lru-cache/
type ListNode struct {
Key int
Val int
Prev *ListNode
Next *ListNode
}
type LRUCache struct {
storage map[int]*ListNode
capacity int
head *ListNode
tail *ListNode
}
func Constructor(capacity int) LRUCache {
head := new(ListNode)
tail := new(ListNode)
head.Next = tail
tail.Prev = head
return LRUCache{
storage: make(map[int]*ListNode),
capacity: capacity,
head: head,
tail: tail,
}
}
func (l *LRUCache) Get(key int) int {
res := -1
if node, ok := l.storage[key]; ok {
l.remove(node)
l.insertToHead(node)
res = node.Val
}
return res
}
func (l *LRUCache) Put(key int, value int) {
if node, ok := l.storage[key]; ok {
node.Val = value
l.remove(node)
l.insertToHead(node)
} else {
if len(l.storage) == l.capacity {
// evict
delete(l.storage, l.tail.Prev.Key)
l.remove(l.tail.Prev)
}
newNode := &ListNode{Key: key, Val: value}
l.insertToHead(newNode)
l.storage[key] = newNode
}
}
func (l *LRUCache) remove(node *ListNode) {
node.Prev.Next = node.Next
node.Next.Prev = node.Prev
}
func (l *LRUCache) insertToHead(node *ListNode) {
headNext := l.head.Next
l.head.Next = node
headNext.Prev = node
node.Prev = l.head
node.Next = headNext
}
/**
* Your LRUCache object will be instantiated and called as such:
* obj := Constructor(capacity);
* param_1 := obj.Get(key);
* obj.Put(key,value);
*/
|
package s3
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/textproto"
"os"
"path/filepath"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type S3 struct {
c *s3.S3
bucket string
expire time.Duration
proxy bool
}
func NewS3(s3AccessKeyId string, s3SecretAccessKey string, s3Endpoint string, s3Region string, s3Bucket string, s3PresignExpire time.Duration, s3ProxyData bool) *S3 {
conf := &aws.Config{
Credentials: credentials.NewStaticCredentials(s3AccessKeyId, s3SecretAccessKey, ""),
Endpoint: aws.String(s3Endpoint),
Region: aws.String(s3Region),
}
return &S3{
c: s3.New(session.New(conf)),
bucket: s3Bucket,
expire: s3PresignExpire,
proxy: s3ProxyData,
}
}
func (s *S3) keyExists(k string) bool {
conf := &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(k),
}
_, err := s.c.HeadObject(conf)
return err == nil
}
func (s *S3) Name() string {
return "S3"
}
func (s *S3) List() ([]string, error) {
conf := &s3.ListObjectsInput{
Bucket: aws.String(s.bucket),
}
rv := []string{}
if err := s.c.ListObjectsPages(conf, func(fl *s3.ListObjectsOutput, last bool) bool {
for _, f := range fl.Contents {
if k := *f.Key; filepath.Ext(k) == ".json" {
rv = append(rv, k[:len(k)-5])
}
}
return true
}); err != nil {
return nil, err
}
return rv, nil
}
func (s *S3) ReadJSON(id string, v interface{}) error {
conf := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id + ".json"),
}
res, err := s.c.GetObject(conf)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() == s3.ErrCodeNoSuchKey {
return os.ErrNotExist
}
}
return err
}
defer res.Body.Close()
return json.NewDecoder(res.Body).Decode(v)
}
func (s *S3) WriteJSON(id string, v interface{}) error {
if s.keyExists(id + ".json") {
return os.ErrExist
}
data, err := json.Marshal(v)
if err != nil {
return err
}
conf := &s3.PutObjectInput{
Body: bytes.NewReader(data),
Bucket: aws.String(s.bucket),
Key: aws.String(id + ".json"),
}
_, err = s.c.PutObject(conf)
return err
}
func (s *S3) DeleteJSON(id string) error {
conf := &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id + ".json"),
}
_, err := s.c.DeleteObject(conf)
return err
}
func (s *S3) OpenData(id string) (io.ReadCloser, error) {
conf := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
}
res, err := s.c.GetObject(conf)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() == s3.ErrCodeNoSuchKey {
return nil, os.ErrNotExist
}
}
return nil, err
}
return res.Body, nil
}
func (s *S3) WriteData(id string, r io.ReadSeeker) (int64, error) {
if s.keyExists(id) {
return 0, os.ErrExist
}
conf := &s3.PutObjectInput{
Body: r,
Bucket: aws.String(s.bucket),
Key: aws.String(id),
}
if _, err := s.c.PutObject(conf); err != nil {
return 0, err
}
return r.Seek(0, io.SeekCurrent)
}
func (s *S3) DeleteData(id string) error {
conf := &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
}
_, err := s.c.DeleteObject(conf)
return err
}
func (s *S3) serveDataHead(w http.ResponseWriter, r *http.Request, id string, contentType string, filename string, attachment bool) error {
conf := &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-Match")]; ok && len(v) > 0 {
conf.IfMatch = aws.String(v[0])
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-Modified-Since")]; ok && len(v) > 0 {
if t, err := http.ParseTime(v[0]); err == nil {
conf.IfModifiedSince = aws.Time(t)
}
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-None-Match")]; ok && len(v) > 0 {
conf.IfNoneMatch = aws.String(v[0])
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-Unmodified-Since")]; ok && len(v) > 0 {
if t, err := http.ParseTime(v[0]); err == nil {
conf.IfUnmodifiedSince = aws.Time(t)
}
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("Range")]; ok && len(v) > 0 {
conf.Range = aws.String(v[0])
}
req, o := s.c.HeadObjectRequest(conf)
if err := req.Send(); err != nil {
if _, ok := err.(awserr.RequestFailure); !ok {
return err
}
}
if v := o.AcceptRanges; v != nil && *v != "" {
w.Header().Set("Accept-Ranges", *v)
}
if v := o.CacheControl; v != nil && *v != "" {
w.Header().Set("Cache-Control", *v)
}
if v := o.ContentEncoding; v != nil && *v != "" {
w.Header().Set("Content-Encoding", *v)
}
if v := o.ContentLanguage; v != nil && *v != "" {
w.Header().Set("Content-Language", *v)
}
if v := o.ContentLength; v != nil && *v > 0 {
w.Header().Set("Content-Length", fmt.Sprintf("%d", *v))
}
if v := o.ETag; v != nil && *v != "" {
w.Header().Set("ETag", *v)
}
if v := o.LastModified; v != nil && !(*v).IsZero() {
w.Header().Set("Last-Modified", (*v).UTC().Format(http.TimeFormat))
}
if contentType != "" {
w.Header().Set("Content-Type", contentType)
}
if filename != "" {
if attachment {
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
} else {
w.Header().Set("Content-Disposition", fmt.Sprintf(`inline; filename="%s"`, filename))
}
}
w.WriteHeader(req.HTTPResponse.StatusCode)
return nil
}
func (s *S3) serveDataGet(w http.ResponseWriter, r *http.Request, id string, contentType string, filename string, attachment bool) error {
conf := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-Match")]; ok && len(v) > 0 {
conf.IfMatch = aws.String(v[0])
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-Modified-Since")]; ok && len(v) > 0 {
if t, err := http.ParseTime(v[0]); err == nil {
conf.IfModifiedSince = aws.Time(t)
}
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-None-Match")]; ok && len(v) > 0 {
conf.IfNoneMatch = aws.String(v[0])
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("If-Unmodified-Since")]; ok && len(v) > 0 {
if t, err := http.ParseTime(v[0]); err == nil {
conf.IfUnmodifiedSince = aws.Time(t)
}
}
if v, ok := r.Header[textproto.CanonicalMIMEHeaderKey("Range")]; ok && len(v) > 0 {
conf.Range = aws.String(v[0])
}
req, o := s.c.GetObjectRequest(conf)
if err := req.Send(); err != nil {
if _, ok := err.(awserr.RequestFailure); !ok {
return err
}
}
if v := o.AcceptRanges; v != nil && *v != "" {
w.Header().Set("Accept-Ranges", *v)
}
if v := o.CacheControl; v != nil && *v != "" {
w.Header().Set("Cache-Control", *v)
}
if v := o.ContentEncoding; v != nil && *v != "" {
w.Header().Set("Content-Encoding", *v)
}
if v := o.ContentLanguage; v != nil && *v != "" {
w.Header().Set("Content-Language", *v)
}
if v := o.ContentLength; v != nil && *v > 0 {
w.Header().Set("Content-Length", fmt.Sprintf("%d", *v))
}
if v := o.ContentRange; v != nil && *v != "" {
w.Header().Set("Content-Range", *v)
}
if v := o.ETag; v != nil && *v != "" {
w.Header().Set("ETag", *v)
}
if v := o.Expires; v != nil && *v != "" {
w.Header().Set("Expires", *v)
}
if v := o.LastModified; v != nil && !(*v).IsZero() {
w.Header().Set("Last-Modified", (*v).UTC().Format(http.TimeFormat))
}
if contentType != "" {
w.Header().Set("Content-Type", contentType)
}
if filename != "" {
if attachment {
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
} else {
w.Header().Set("Content-Disposition", fmt.Sprintf(`inline; filename="%s"`, filename))
}
}
w.WriteHeader(req.HTTPResponse.StatusCode)
if v := o.ContentLength; v != nil && *v > 0 {
io.CopyN(w, o.Body, *o.ContentLength)
} else {
io.Copy(w, o.Body)
}
return o.Body.Close()
}
func (s *S3) redirectDataGet(w http.ResponseWriter, r *http.Request, id string, contentType string, filename string, attachment bool) error {
conf := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
ResponseContentType: aws.String(contentType),
}
if filename != "" {
if attachment {
conf.ResponseContentDisposition = aws.String(fmt.Sprintf(`attachment; filename="%s"`, filename))
} else {
conf.ResponseContentDisposition = aws.String(fmt.Sprintf(`inline; filename="%s"`, filename))
}
}
req, _ := s.c.GetObjectRequest(conf)
requrl, err := req.Presign(s.expire)
if err != nil {
return err
}
http.Redirect(w, r, requrl, http.StatusFound)
return nil
}
func (s *S3) ServeData(w http.ResponseWriter, r *http.Request, id string, contentType string, filename string, attachment bool) error {
switch r.Method {
case http.MethodHead:
// HEAD requests are always proxied
return s.serveDataHead(w, r, id, contentType, filename, attachment)
case http.MethodGet:
if s.proxy {
return s.serveDataGet(w, r, id, contentType, filename, attachment)
}
return s.redirectDataGet(w, r, id, contentType, filename, attachment)
default:
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
return nil
}
}
|
package main
import (
"models"
"net/http"
"redis"
)
func init() {
redis.Initialize()
models.Initialize()
}
func main() {
server := http.Server{
Addr: ":8000",
}
assetsHandler := http.StripPrefix("/assets/", http.FileServer(http.Dir("assets")))
http.HandleFunc("/", handleRequest)
http.Handle("/assets/", assetsHandler)
server.ListenAndServe()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.