text stringlengths 11 4.05M |
|---|
// full_test
package main
import (
"GT/Graphics"
// "GT/Scene"
"GT/Window"
// "fmt"
// "github.com/veandco/go-sdl2/sdl"
"math/rand"
// "time"
"fmt"
// "github.com/davecheney/profile"
)
func random(min, max int) int {
//srand.Seed(time.Now().Unix())
return rand.Intn(max-min) + min
}
type TestGame struct {
*Graphics.BaseScene
}
func (g *TestGame) Load() {
for i := 0; i < 25000; i++ {
g.AddSprite("smiley"+string(i), Graphics.NewImageSection(0, 0, 128, 128))
g.GetSprite("smiley"+string(i)).SetLocation(float32(0), float32(random(0, 500)))
}
}
func (g *TestGame) Update() {
for i := 0; i < 25000; i++ {
x, y := g.GetSprite("smiley" + string(i)).GetLocation()
x += 1
g.GetSprite("smiley"+string(i)).SetLocation(float32(x), y)
// fmt.Printf("smileyend %d has y %f\n", i, y)
}
}
func main() {
// defer profile.Start(profile.CPUProfile).Stop()
w := Window.NewWindowedWindow("test", 600, 400)
s, _ := Graphics.NewBasicScene("smiley.png", &w)
g := TestGame{BaseScene: &s}
g.LoadHandler = g.Load
g.UpdateHandler = g.Update
fmt.Println(g)
// for i := 0; i < 25000; i++ {
// s.AddSprite("smiley"+string(i), Graphics.NewImageSection(0, 0, 128, 128))
// s.GetSprite("smiley"+string(i)).SetLocation(float32(0), float32(random(0, 500)))
// _, y := s.GetSprite("smiley" + string(i)).GetLocation()
// fmt.Printf("smileystart %d has y %f\n", i, y)
// }
// var window *sdl.Window
// var context sdl.GLContext
// var event sdl.Event
// var running bool
// var err error
// running = true
// // x := 0
// for running {
// for event = sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
// switch t := event.(type) {
// case *sdl.QuitEvent:
// running = false
// case *sdl.MouseMotionEvent:
// fmt.Println(string(t.Timestamp))
// }
// }
// w.Clear()
// for i := 0; i < 25000; i++ {
// _, y := s.GetSprite("smiley" + string(i)).GetLocation()
// s.GetSprite("smiley"+string(i)).SetLocation(float32(x), y)
// // fmt.Printf("smileyend %d has y %f\n", i, y)
// }
// s.Draw()
// w.Refresh()
g.Start()
// x += 1
// }
// // e := w.Open()
// // if e != nil {
// // t.Error("Window open failure: " + e.Error())
// // }
// // if w.isOpen() == false {
// // t.Error("Window should be open but it's not")
// // }
// // if w.Width != 800 {
// // t.Error("Window width should be 800")
// // }
// // if w.Height != 600 {
// // t.Error("Window height should be 600")
// // }
// running := true
// for running == true {
// w.Refresh()
// }
w.Close()
}
|
package models
import (
"time"
"github.com/jinzhu/gorm"
)
// Session type that extends gorm.Model.
type Session struct {
gorm.Model
// Session ID
Key string `gorm:"unique"`
// encrypted cookie
Data []byte
// Time the session will expire
ExpiresAt time.Time
}
|
package lc
// Time: O(n)
// Benchmark: 0ms 3mb | 100% 79%
func intersection(nums1 []int, nums2 []int) []int {
add := func(set *[]int, num int) {
for _, v := range *set {
if v == num {
return
}
}
*set = append(*set, num)
}
nums := make(map[int]bool)
for _, num := range nums1 {
nums[num] = true
}
set := []int{}
for _, num := range nums2 {
if nums[num] {
add(&set, num)
}
}
return set
}
|
package whosonit
import (
"template"
)
const root_template = `
<html>
<body>
<h1>Who is on it?</h1>
<h2>Current Emails</h2>
<ul>
{.repeated section @}
<li><a href="/show?sender={Sender}&date={RecieptDate}">{Sender} - {Subject} - {RecieptDate}</a></li>
{.or}
Nothing to do. Good job.
{.end}
</ul>
</body>
</html>
`
var rootTemplate = template.MustParse(root_template, nil)
const show_template = `
<html>
<body>
{.repeated section @}
<h1>{Subject|html}</h1>
<h2>{Sender}</h2>
<h3>Status</h3>
<p>Received: {RecieptDate}</p>
<p>Owner: {.section Owner}{Owner}</p>
<p>Accepted On: {OwnerDate}{.or}Nobody{.end}</p>
{.section ClosedDate}
<p>Closed On: {ClosedDate}</p>
{.end}
<h3>Message Content</h3>
<p>{Body}</p>
{.section Owner}
{.section ClosedDate}
<h3>Actions</h3>
<form action="/close">
<input type="hidden" name="sender" value="{Sender}">
<input type="hidden" name="date" value="{RecieptDate}">
<input type="submit" name="Close" value="Close">
</form>
{.or}
{.end}
{.or}
<h3>Actions</h3>
<form action="/accept">
<input type="hidden" name="sender" value="{Sender}">
<input type="hidden" name="date" value="{RecieptDate}">
<input type="submit" name="Accept" value="Accept">
</form>
{.end}
{.end}
</body>
</html>`
var showTemplate = template.MustParse(show_template, nil)
const test_form = `
<html>
<body>
<form action="/test_form">
<p>Sender: <input name="Sender"></p>
<p>Message: <input name="Body"></p>
<p><input type="submit"></p>
</body>
</html>`
|
package adminModel
import (
"gopkg.in/mgo.v2/bson"
"time"
"crypto/md5"
"encoding/hex"
"casino_common/utils/db"
"sendlinks/conf/tableName"
)
//后台用户
type Admin struct {
ObjIds bson.ObjectId `bson:"_id"`
NickName string //昵称
AccountName string //帐户
Password string //密码
AccountPower []*Power //权限列
Time time.Time
}
func (A *Admin) Insert() error {
A.ObjIds = bson.NewObjectId()
p := md5.New()
p.Write([]byte(A.Password))
A.Password = hex.EncodeToString(p.Sum(nil))
A.Time = time.Now()
err := db.C(tableName.DB_LINKS_ADMIN_INFO).Insert(A)
return err
}
//修改
func (A *Admin) Save() error {
err := db.C(tableName.DB_LINKS_ADMIN_INFO).Update(bson.M{"_id": A.ObjIds}, A)
return err
}
//删除
func DelAdminUser(s string) error {
err := db.C(tableName.DB_LINKS_ADMIN_INFO).Remove(bson.M{"_id":bson.ObjectIdHex(s)})
return err
}
//查询
|
package main
//2325. 解密消息
//给你字符串 key 和 message ,分别表示一个加密密钥和一段加密消息。解密 message 的步骤如下:
//
//使用 key 中 26 个英文小写字母第一次出现的顺序作为替换表中的字母 顺序 。
//将替换表与普通英文字母表对齐,形成对照表。
//按照对照表 替换 message 中的每个字母。
//空格 ' ' 保持不变。
//例如,key = "happy boy"(实际的加密密钥会包含字母表中每个字母 至少一次),据此,可以得到部分对照表('h' -> 'a'、'a' -> 'b'、'p' -> 'c'、'y' -> 'd'、'b' -> 'e'、'o' -> 'f')。
//返回解密后的消息。
//
//
//
//示例 1:
//
//
//
//输入:key = "the quick brown fox jumps over the lazy dog", message = "vkbs bs t suepuv"
//输出:"this is a secret"
//解释:对照表如上图所示。
//提取 "the quick brown fox jumps over the lazy dog" 中每个字母的首次出现可以得到替换表。
//示例 2:
//
//
//
//输入:key = "eljuxhpwnyrdgtqkviszcfmabo", message = "zwx hnfx lqantp mnoeius ycgk vcnjrdb"
//输出:"the five boxing wizards jump quickly"
//解释:对照表如上图所示。
//提取 "eljuxhpwnyrdgtqkviszcfmabo" 中每个字母的首次出现可以得到替换表。
//
//
//提示:
//
//26 <= key.length <= 2000
//key 由小写英文字母及 ' ' 组成
//key 包含英文字母表中每个字符('a' 到 'z')至少一次
//1 <= message.length <= 2000
//message 由小写英文字母和 ' ' 组成
func decodeMessage(key string, message string) string {
hMap := make(map[rune]byte)
index := byte('a')
for _, v := range key {
if v != ' ' && hMap[v] == 0 {
hMap[v] = index
index++
}
}
result := []byte(message)
for i, v := range message {
if v != ' ' {
result[i] = hMap[v]
}
}
return string(result)
}
|
package weixin
import (
"crypto/sha1"
"encoding/xml"
"fmt"
"io"
"log"
"net/http"
"sort"
"strings"
)
func calcSHA1(v string) string {
h := sha1.New()
io.WriteString(h, v)
return fmt.Sprintf("%x", h.Sum(nil))
}
func calcSignature(timestamp, nonce string) string {
sa := sort.StringSlice([]string{Token, timestamp, nonce})
sa.Sort()
return calcSHA1(strings.Join(sa, ""))
}
func isValid(w http.ResponseWriter, r *http.Request) bool {
vs := r.URL.Query()
signature := vs.Get("signature")
timestamp := vs.Get("timestamp")
nonce := vs.Get("nonce")
echo := vs.Get("echostr")
mySignature := calcSignature(timestamp, nonce)
if signature == mySignature {
fmt.Fprintf(w, "%s", echo)
return true
} else {
log.Println("isValid: NOT PASS!", signature, timestamp, nonce, echo)
}
return false
}
type RequestMessage struct {
ToUserName string
FromUserName string
CreateTime int
MsgType string
// --------------------------------------------------
// text
Content string
// image
PicUrl string
// location
Location_X float64
Location_Y float64
Scale int
Label string
// link
Title string
Description string
Url string
// --------------------------------------------------
MsgId int64
}
type ResponseMessage struct {
ToUserName string
FromUserName string
CreateTime int
MsgType string
// --------------------------------------------------
// text
Content string
// music
MusicUrl string
HQMusicUrl string
// news
ArticleCount int
// --------------------------------------------------
FuncFlag int
}
type PushMessage struct {
ToUserName string
FromUserName string
CreateTime int
MsgType string
Event string
EventKey string
}
type MessageHandler func(http.ResponseWriter, RequestMessage)
var (
Token string
msgTypeToHandler map[string]MessageHandler
)
func init() {
msgTypeToHandler = make(map[string]MessageHandler)
}
func RegisterMessageHandler(msgType string, handler MessageHandler) {
msgTypeToHandler[msgType] = handler
}
func ReceiveMessage(w http.ResponseWriter, r *http.Request) {
if !isValid(w, r) {
log.Println("invalid request: ", r)
return
}
var reqMsg RequestMessage
err := xml.NewDecoder(r.Body).Decode(&reqMsg)
if err != nil {
log.Println("[ReceiveMessage] decode xml error: ", err)
return
}
if handler, ok := msgTypeToHandler[reqMsg.MsgType]; ok && handler != nil {
handler(w, reqMsg)
}
}
func Serve(port int, path string) error {
http.HandleFunc(path, ReceiveMessage)
addr := fmt.Sprintf(":%d", port)
s := &http.Server{
Addr: addr,
Handler: nil,
MaxHeaderBytes: 1 << 20,
}
return s.ListenAndServe()
}
|
package lib
import (
"strings"
tea "github.com/charmbracelet/bubbletea"
"github.com/muesli/reflow/ansi"
)
type Viewport struct {
ModelWidth, ModelHeight, YPosition int
Component
}
func (v *Viewport) Width() int { return v.ModelWidth }
func (v *Viewport) Height() int { return v.ModelHeight }
func (v Viewport) Drawer() *ViewportDrawer {
return &ViewportDrawer{
Viewport: v,
Drawer: v.Component.Drawer(),
}
}
type ViewportDrawer struct {
Viewport // embed for height/width methods
Drawer
}
type viewports struct {
totals tea.WindowSizeMsg
ready bool
focusPane Pane
separator MergableSep
params, labels, logs Viewport
help HelpPane
}
func (v *viewports) focused() *Viewport {
switch v.focusPane {
case LabelsPane:
return &v.labels
case LogsPane:
return &v.logs
default:
return &v.params
}
}
func (v *viewports) Update(msg tea.Msg) tea.Cmd {
var cmds []tea.Cmd
switch msg := msg.(type) {
case tea.WindowSizeMsg:
v.Size(msg)
case tea.KeyMsg:
switch msg.String() {
case "n":
v.focusPane = v.focusPane.Next()
v.Size(v.totals)
case "p":
v.focusPane = v.focusPane.Prev()
v.Size(v.totals)
}
}
cmd := v.focused().Update(msg)
cmds = append(cmds, cmd)
return tea.Batch(cmds...)
}
func (v *viewports) selected() (main *Viewport, secondaries []*Viewport) {
switch v.focusPane {
case LabelsPane:
return &v.labels, []*Viewport{&v.params, &v.logs}
case LogsPane:
return &v.logs, []*Viewport{&v.params, &v.labels}
// ParamsPane is the default
default:
return &v.params, []*Viewport{&v.labels, &v.logs}
}
}
// Size sets pane sizes (primary & secondaries) based on the golden ratio.
func (v *viewports) Size(msg tea.WindowSizeMsg) {
v.totals = msg
width := msg.Width - v.separator.Width()*2
if !v.ready {
v.ready = true
}
v.help.Height = 4
v.help.Width = v.totals.Width
withoutHeaders := msg.Height - 3
withoutHelp := withoutHeaders - v.help.Height
height := withoutHelp
v.params.ModelHeight = height
v.labels.ModelHeight = height
v.logs.ModelHeight = height
primary := int(float64(width) / GoldenRatio)
secondary := (width - primary) / 2
main, secondaries := v.selected()
main.ModelWidth = primary
for _, s := range secondaries {
s.ModelWidth = secondary
}
}
func (v *viewports) header() string {
pane := v.focusPane
width := v.totals.Width
var start int
switch pane {
case LabelsPane:
start = v.params.Width() + v.separator.Width()
case LogsPane:
start = v.params.Width()*2 + v.separator.Width()*2 // all non-primary panes have the same size
}
headerTopFrame := "╭─────────────╮"
headerBotFrame := "╰─────────────╯"
headerTop := ExactWidth(LPad(headerTopFrame, start+ansi.PrintableRuneWidth(headerTopFrame)), width)
headerBot := ExactWidth(LPad(headerBotFrame, start+ansi.PrintableRuneWidth(headerBotFrame)), width)
lConnector := "│"
if start > 0 {
lConnector = "┤"
}
headerMid := lConnector + CenterTo(pane.String(), ansi.PrintableRuneWidth(headerTopFrame)-2) + "├"
headerMid = LPadWith(headerMid, '─', start+ansi.PrintableRuneWidth(headerMid))
headerMid = RPadWith(headerMid, '─', width)
return strings.Join([]string{headerTop, headerMid, headerBot}, "\n")
}
func (v *viewports) View() string {
if !v.ready {
return "\n Initializing..."
}
merger := CrossMerge{
v.params.Drawer(),
v.labels.Drawer(),
v.logs.Drawer(),
}.Intersperse(v.separator)
return strings.Join(
[]string{
v.header(),
merger.View(),
v.help.View(),
},
"\n",
)
}
|
package delaytask
import (
"time"
"strconv"
"strings"
)
const (
// 普通的延时任务,或者定时任务
DelayTask = iota
// 周期性任务
PeriodTask
)
type Serializer interface {
ToJson() string
}
type Runner interface {
Serializer
Run() (bool, error)
// 计划执行runner的时刻
GetToRunAt() time.Time
UpdateToRunAt()
GetRunAt() time.Time
GetType() int
IsTaskEnd() bool
GetTimeout() time.Duration
GetName() string
GetID() int64
SetError(error)
// 返回运行结果
Result() interface{}
}
type IWorker interface {
Execute(Runner)
}
type TaskTime time.Time
type TaskDuration time.Duration
func (t TaskTime) ToTime() time.Time {
return time.Time(t)
}
func (t TaskDuration) ToDuration() time.Duration {
return time.Duration(t)
}
// 精确到秒全部转化为Unix 格式
func (t *TaskTime) UnmarshalJSON(data []byte) error {
unixStr := string(data)
unixStr = strings.Trim(unixStr, "\"")
unix, err := strconv.ParseInt(unixStr, 10, 64)
if err != nil {
return err
}
*t = TaskTime(time.Unix(unix, 0))
return nil
}
func (t TaskTime) MarshalJSON() ([]byte, error) {
unix := time.Time(t).Unix()
unixStr := strconv.FormatInt(unix, 10)
unixStr = "\"" + unixStr + "\""
return []byte(unixStr), nil
}
func (t *TaskDuration) UnmarshalJSON(data []byte) error {
secondStr := string(data)
secondStr = strings.Trim(secondStr, "\"")
seconds, err := strconv.ParseInt(secondStr, 10, 64)
if err != nil {
return err
}
*t = TaskDuration(seconds * 1e9)
return nil
}
func (t TaskDuration) MarshalJSON() ([]byte, error) {
seconds := int64(time.Duration(t)) / 1e9
secondStr := strconv.FormatInt(seconds, 10)
secondStr = "\"" + secondStr + "\""
return []byte(secondStr), nil
}
type Creator func(task string) Runner
type Factory interface {
Register(name string, creator Creator)
Create(string) Runner
}
|
package find
import (
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
)
type Options struct {
Recursive bool
StopAtFirstMatch bool
RegularFilesOnly bool
DirectoriesOnly bool
MatchRegex *regexp.Regexp
MatchExtension string
// Sort newest to oldest
SortByRecentModTime bool
// Sort oldest to newest
ReverseSortByRecentModTime bool
// Add these later
// // Sort by name
// SortByName bool
// // Sort by name reversed
// ReverseSortByName bool
}
type Found struct {
Path string
Info os.FileInfo
}
func Find(dir string, opts Options) ([]Found, error) {
var result []Found
if opts.Recursive {
err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if opts.RegularFilesOnly {
if !fi.Mode().IsRegular() {
return nil
}
}
if opts.DirectoriesOnly {
if !fi.Mode().IsDir() {
return nil
}
}
if nil != opts.MatchRegex {
if !opts.MatchRegex.MatchString(fi.Name()) {
return nil
}
}
if opts.MatchExtension != "" {
if opts.MatchExtension != filepath.Ext(fi.Name()) {
return nil
}
}
result = append(result, Found{
Path: path,
Info: fi,
})
if opts.StopAtFirstMatch {
return io.EOF
}
return nil
})
if err == io.EOF {
err = nil
}
if err != nil {
return result, err
}
} else {
files, err := ioutil.ReadDir(dir)
if err != nil {
return result, err
}
for _, fi := range files {
if opts.RegularFilesOnly {
if !fi.Mode().IsRegular() {
continue
}
}
if opts.DirectoriesOnly {
if !fi.Mode().IsDir() {
continue
}
}
if nil != opts.MatchRegex {
if !opts.MatchRegex.MatchString(fi.Name()) {
continue
}
}
if opts.MatchExtension != "" {
if opts.MatchExtension != filepath.Ext(fi.Name()) {
continue
}
}
result = append(result, Found{
Path: filepath.Join(dir, fi.Name()),
Info: fi,
})
if opts.StopAtFirstMatch {
break
}
}
}
if opts.SortByRecentModTime {
sort.Slice(result, func(i, j int) bool {
return result[i].Info.ModTime().After(result[j].Info.ModTime())
})
} else if opts.ReverseSortByRecentModTime {
sort.Slice(result, func(i, j int) bool {
return result[i].Info.ModTime().Before(result[j].Info.ModTime())
})
}
if len(result) > 0 {
return result, nil
}
return result, errors.New("no files found")
}
|
/**
* Copyright (c) 2018-present, MultiVAC Foundation.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package chain
import (
"fmt"
"os"
"reflect"
"testing"
"github.com/multivactech/MultiVAC/configs/config"
"github.com/multivactech/MultiVAC/model/chaincfg/chainhash"
"github.com/multivactech/MultiVAC/model/chaincfg/multivacaddress"
"github.com/multivactech/MultiVAC/model/chaincfg/signature"
"github.com/multivactech/MultiVAC/model/shard"
"github.com/multivactech/MultiVAC/model/wire"
)
const (
TestDir = "test"
)
var (
chain *diskBlockChain
TestShard = shard.IDToShardIndex(1)
)
func fakeBlock(shard shard.Index, hgt wire.BlockHeight) *wire.MsgBlock {
header := wire.BlockHeader{ShardIndex: shard, Height: int64(hgt)}
return &wire.MsgBlock{
Header: header,
Body: &wire.BlockBody{},
}
}
// Set up to do some initialization work
func setup() {
_, err := config.LoadConfig()
if err != nil {
panic(err)
}
config.GlobalConfig().DataDir = TestDir
chain = newDiskBlockChain()
}
// For disk chain, remove the test folder
func clear() {
os.RemoveAll(TestDir)
}
func TestReceiveBlock(t *testing.T) {
setup()
defer clear()
// Make a fake block
fBlock := fakeBlock(TestShard, 2)
ok := chain.ReceiveBlock(fBlock)
if !ok {
t.Errorf("Chain faild to received block: %v", fBlock)
}
// Check if the block is received successfully.
if !chain.containsShardsBlock(TestShard, wire.BlockHeight(2)) {
t.Errorf("Wrong")
}
// Check receive the same block.
ok = chain.ReceiveBlock(fBlock)
if !ok {
t.Errorf("Chain faild to received the same block: %v", fBlock)
}
}
func TestGetInfo(t *testing.T) {
setup()
defer clear()
// Make a fake block.
fBlock := fakeBlock(TestShard, 2)
ok := chain.ReceiveBlock(fBlock)
if !ok {
t.Errorf("Chain faild to received block: %v", fBlock)
}
// Test get block by hash.
b := chain.GetShardsBlockByHash(fBlock.Header.BlockHeaderHash())
if b == nil {
t.Errorf("Faild find block in chain.")
return
}
if b.Header.BlockHeaderHash() != fBlock.Header.BlockHeaderHash() {
t.Errorf("Block match faild (by hash), (expect): %v, (actual): %v", fBlock.Header.BlockHeaderHash(), b.Header.BlockHeaderHash())
}
// Test get block by height.
b = chain.GetShardsBlockByHeight(TestShard, 2)
if b == nil {
t.Errorf("Faild find block in chain.")
return
}
if b.Header.BlockHeaderHash() != fBlock.Header.BlockHeaderHash() {
t.Errorf("Block match faild (by height), (expect): %v, (actual): %v", fBlock.Header.BlockHeaderHash(), b.Header.BlockHeaderHash())
}
// Test get header by hash.
h := chain.GetShardsHeaderByHash(fBlock.Header.BlockHeaderHash())
if h == nil {
t.Errorf("Faild find block in chain.")
return
}
if h.BlockHeaderHash() != fBlock.Header.BlockHeaderHash() {
t.Errorf("Header match faild (by hash), (expect): %v, (actual): %v", fBlock.Header.BlockHeaderHash(), h.BlockHeaderHash())
}
// Test get header by hash.
h = chain.GetShardsHeaderByHeight(TestShard, 2)
if h == nil {
t.Errorf("Faild find block in chain.")
return
}
if h.BlockHeaderHash() != fBlock.Header.BlockHeaderHash() {
t.Errorf("Header match faild (by height), (expect): %v, (actual): %v", fBlock.Header.BlockHeaderHash(), h.BlockHeaderHash())
}
// Test get header hashes.
from := wire.BlockHeight(1)
end := wire.BlockHeight(2)
hashes := chain.GetShardsHeaderHashes(TestShard, from, end)
if len(hashes) != int(end-from+1) {
t.Errorf("Wrong lenght of result, (expect): %d, (actual): %d", end-from+1, len(hashes))
}
// Test get shard height
height := chain.GetShardsHeight(TestShard)
if height != 2 {
t.Errorf("Height match error, (expect): 2, (actual): %d", height)
}
}
func TestReceiveHeader(t *testing.T) {
setup()
defer clear()
// Make a fake block.
fBlock := fakeBlock(TestShard, 2)
ok := chain.ReceiveHeader(&fBlock.Header)
if !ok {
t.Errorf("Chain faild to received header: %v", fBlock.Header)
}
// Test get header by height
h := chain.GetShardsHeaderByHeight(TestShard, 2)
if h == nil {
t.Errorf("Faild find block in chain.")
return
}
if h.BlockHeaderHash() != fBlock.Header.BlockHeaderHash() {
t.Errorf("Header match faild (by height) (expect): %v, (actual): %v", fBlock.Header.BlockHeaderHash(), h.BlockHeaderHash())
}
// Test get header by hash.
h = chain.GetShardsHeaderByHash(fBlock.Header.BlockHeaderHash())
if h == nil {
t.Errorf("Faild find block in chain.")
return
}
if h.BlockHeaderHash() != fBlock.Header.BlockHeaderHash() {
t.Errorf("Header match faild (by hash), (expect): %v, (actual): %v", fBlock.Header.BlockHeaderHash(), h.BlockHeaderHash())
}
// TODO(zz): If receive header first, then can't received the corresponding block.
// Is this a bug or our design?
chain.ReceiveBlock(fBlock)
}
// TODO: complete testSyncTrigger when test sync
// Used for function(TestSync)
// type testSyncTrigger struct {
// count int
// }
// func (st *testSyncTrigger) MaybeSync() {
// st.count++
// }
// TODO: fix for testnet-3.0
// func TestSync(t *testing.T) {
// setup()
// defer clear()
// // Make a fake syncTrigger
// trigger := new(testSyncTrigger)
// // Register syncTrigger
// chain.SetSyncTrigger(TestShard, trigger)
// // Receive a fake block
// fBlock := fakeBlock(TestShard, 2)
// ok := chain.ReceiveBlock(fBlock)
// if !ok {
// t.Errorf("Faild to receive block, block height is %d", fBlock.Header.Height)
// }
// // Receive a block with heigh 4, this may trigger sync.
// fBlock2 := fakeBlock(TestShard, 4)
// ok = chain.ReceiveBlock(fBlock2)
// if !ok {
// t.Errorf("Faild to receive block, block height is %d", fBlock2.Header.Height)
// }
// if trigger.count != 1 {
// t.Error("Faild to trigger sync.")
// }
// // Receive a block that maybe from sync, this will not trigger sync.
// fBlock3 := fakeBlock(TestShard, 3)
// ok = chain.ReceiveBlock(fBlock3)
// if !ok {
// t.Errorf("Faild to receive block, block height is %d", fBlock3.Header.Height)
// }
// if trigger.count != 1 {
// t.Error("Invalid count for trigger")
// }
// }
func TestGetSmartContract(t *testing.T) {
setup()
defer clear()
// Make a fake smart contracts
txHash := chainhash.Hash{}
err := txHash.SetBytes([]byte("testContractAddr"))
if err != nil {
fmt.Println("Faild to set bytes: " + err.Error())
}
scs := []*wire.SmartContract{
{
ContractAddr: txHash.FormatSmartContractAddress(),
APIList: []string{"exe", "do"},
Code: []byte("public static void main"),
},
}
ok := chain.receiveSmartContracts(scs)
if !ok {
t.Errorf("Chain faild to received samrt contracts: %v", scs)
}
sc := chain.GetSmartContract(txHash.FormatSmartContractAddress())
// Check if the smartContract is received correct.
if sc == nil {
t.Errorf("Chain faild to get smart contract: %v", sc)
}
if !reflect.DeepEqual(sc.APIList, []string{"exe", "do"}) {
t.Errorf("Chain save wrong smart contract APIList, want %v\n get: %v", []string{"exe", "do"}, sc.APIList)
}
if !reflect.DeepEqual(sc.Code, []byte("public static void main")) {
t.Errorf("Chain save wrong smart contract Code, want %v\n get: %v", []byte("public static void main"), sc.Code)
}
}
func TestGetSmartContractOuts(t *testing.T) {
setup()
defer clear()
txHash := chainhash.Hash{}
err := txHash.SetBytes([]byte("testContractAddr"))
if err != nil {
fmt.Println("Faild to set bytes: " + err.Error())
}
codeData := []byte("public static void main")
initData := []byte("init")
contractAddr := multivacaddress.GenerateAddress(signature.PublicKey(txHash.CloneBytes()), multivacaddress.SmartContractAddress)
// Make a fake smart contract outs
out1 := &wire.OutPoint{
Shard: TestShard,
TxHash: txHash,
Index: 0,
Data: codeData,
ContractAddress: contractAddr,
}
out2 := &wire.OutPoint{
Shard: TestShard,
TxHash: txHash,
Index: 1,
Data: initData,
ContractAddress: contractAddr,
}
err = chain.saveSmartContractCodeOut(out1.ContractAddress, out1.Shard, out1.ToUnspentOutState())
if err != nil {
t.Errorf("Chain faild to received smart contract code out: %v", out1)
}
err = chain.saveSmartContractShardInitOut(out2.ContractAddress, out2.Shard, out2.ToUnspentOutState())
if err != nil {
t.Errorf("Chain faild to received smart contract shard init out: %v", out2)
}
codeOut := chain.getSmartContractCodeOut(contractAddr, out1.Shard)
shardDataOut := chain.getSmartContractShardInitOut(contractAddr, out2.Shard)
// Check if the smartContract outs is received correct.
if codeOut == nil || shardDataOut == nil {
t.Errorf("Chain faild to get smart contract out: %v\n%v", codeOut, shardDataOut)
}
// 判断存放了合约代码的out数据是否正确
if codeOut.Index != 0 {
t.Errorf("Chain faild to get smart contract data out According to the order: %v", codeOut)
}
if !reflect.DeepEqual(codeOut.Data, codeData) {
t.Errorf("Chain get WRONG smart contract data out, want: %v\nget: %v", codeData, codeOut.Data)
}
// 判断存放了init的out数据是否正确
if shardDataOut.Index != 1 {
t.Errorf("Chain faild to get smart contract data out According to the order: %v", shardDataOut)
}
if !reflect.DeepEqual(shardDataOut.Data, initData) {
t.Errorf("Chain get WRONG smart contract data out, want: %v\nget: %v", initData, shardDataOut.Data)
}
}
|
package eventsapi
import (
"encoding/json"
"fmt"
"github.com/oklahomer/golack/v2/event"
"github.com/tidwall/gjson"
)
// https://api.slack.com/events-api#callback_field_overview
type outer struct {
Token string `json:"token"`
TeamID string `json:"team_id"`
APIAppID string `json:"api_app_id"`
Type string `json:"type"`
AuthedUsers []string `json:"authed_users"`
EventID event.EventID `json:"event_id"`
EventTime *event.TimeStamp `json:"event_time"`
}
// EventWrapper contains given event, metadata and the request.
type EventWrapper struct {
*outer
Event interface{}
Request *SlackRequest
}
// URLVerification is a special payload for initial configuration.
// When an administrator register an API endpoint to Slack APP configuration page, this payload is sent to the endpoint
// to verify the validity of that endpoint.
//
// This is part of the events list located at https://api.slack.com/events,
// but the structure is defined in this package because this is specifically designed for Events API protocol
// Just like Ping and Pong events are specifically designed for RTM API protocol.
type URLVerification struct {
Type string `json:"type"`
Challenge string `json:"challenge"`
Token string `json:"token"`
}
// DecodePayload receives req and decode given event.
// The returning value can be one of *event.URLVerification or *EventWrapper.
// *event.URLVerification can be sent on the initial configuration when an administrator inputs API endpoint to Slack.
func DecodePayload(req *SlackRequest) (interface{}, error) {
parsed := gjson.ParseBytes(req.Payload)
typeValue := parsed.Get("type")
if !typeValue.Exists() {
return nil, event.NewMalformedPayloadError(fmt.Sprintf("required type field is not given: %s", parsed))
}
switch eventType := typeValue.String(); eventType {
case "url_verification":
verification := &URLVerification{}
err := json.Unmarshal(req.Payload, verification)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal JSON: %w", err)
}
return verification, nil
case "event_callback":
o := &outer{}
err := json.Unmarshal(req.Payload, o)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal JSON: %w", err)
}
// Read the event field that represents the Slack event being sent
eventValue := parsed.Get("event")
if !eventValue.Exists() {
return nil, event.NewMalformedPayloadError(fmt.Sprintf("requred event field is not given: %s", parsed))
}
ev, err := event.Map(eventValue)
if err != nil {
return nil, err
}
// Construct a wrapper object that contains meta, event and request data
return &EventWrapper{
outer: o,
Event: ev,
Request: req,
}, nil
default:
return nil, event.NewUnknownPayloadTypeError(fmt.Sprintf("undefined type of %s is given", eventType))
}
}
|
// Copyright (c) 2020 - for information on the respective copyright owner
// see the NOTICE file and/or the repository at
// https://github.com/hyperledger-labs/perun-node
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ethereumtest
import (
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
pethwallet "perun.network/go-perun/backend/ethereum/wallet"
pwallet "perun.network/go-perun/wallet"
"github.com/hyperledger-labs/perun-node"
"github.com/hyperledger-labs/perun-node/blockchain/ethereum"
)
// Command to start the ganache-cli node:
//
// ganache-cli --account="0x1fedd636dbc7e8d41a0622a2040b86fea8842cef9d4aa4c582aad00465b7acff,10000000000000000000" \
// --account="0xb0309c60b4622d3071fad3e16c2ce4d0b1e7758316c187754f4dd0cfb44ceb33,10000000000000000000"
//
// Ethereum address corresponding the above accounts: 0x8450c0055cB180C7C37A25866132A740b812937B and
// 0xc4bA4815c82727554e4c12A07a139b74c6742322.
//
// The account in the command corresponds to the on-chain account of first two users when seeding the rand source
// with "RandSeedForTestAccs" and passing numParts as 0. If numParts is not zero, then the on-chain account is funded
// only for the first user. Hence DO NOT CHANGE THE RAND SEED for integration tests in this package.
//
// The contracts will be deployed only during the first run of tests and will be resused in subsequent runs. This
// saves ~0.3s of setup time in each run. Hence when running tests on development machine, START THE NODE ONLY ONCE.
var adjudicatorAddr, assetAddr pwallet.Address
// SetupContractsT is the test friendly version of SetupContracts.
// It uses the passed testing.T to handle the errors and registers the cleanup functions on it.
func SetupContractsT(t *testing.T, chainURL string, onChainTxTimeout time.Duration) (
adjudicator, asset pwallet.Address) {
var err error
adjudicator, asset, err = SetupContracts(chainURL, onChainTxTimeout)
require.NoError(t, err)
return adjudicator, asset
}
// ContractAddrs returns the contract addresses of adjudicator and asset contracts used in test setups.
// Address generation mechanism in ethereum is used to pre-compute the contract address.
//
// On a fresh ganache-cli node run the setup contracts helper function to deploy these contracts.
func ContractAddrs() (adjudicator, asset pwallet.Address) {
prng := rand.New(rand.NewSource(RandSeedForTestAccs))
ws, err := NewWalletSetup(prng, 2)
if err != nil {
panic("Cannot setup test wallet")
}
adjudicator = pethwallet.AsWalletAddr(crypto.CreateAddress(pethwallet.AsEthAddr(ws.Accs[0].Address()), 0))
asset = pethwallet.AsWalletAddr(crypto.CreateAddress(pethwallet.AsEthAddr(ws.Accs[0].Address()), 1))
return
}
// SetupContracts checks if valid contracts are deployed in pre-computed addresses, if not it deployes them.
// Address generation mechanism in ethereum is used to pre-compute the contract address.
func SetupContracts(chainURL string, onChainTxTimeout time.Duration) (
adjudicator, asset pwallet.Address, _ error) {
prng := rand.New(rand.NewSource(RandSeedForTestAccs))
ws, err := NewWalletSetup(prng, 2)
if err != nil {
return nil, nil, err
}
onChainCred := perun.Credential{
Addr: ws.Accs[0].Address(),
Wallet: ws.Wallet,
Keystore: ws.KeystorePath,
Password: "",
}
if !isBlockchainRunning(chainURL) {
return nil, nil, errors.New("cannot connect to ganache-cli node at " + chainURL)
}
if adjudicatorAddr == nil && assetAddr == nil {
adjudicator = pethwallet.AsWalletAddr(crypto.CreateAddress(pethwallet.AsEthAddr(onChainCred.Addr), 0))
asset = pethwallet.AsWalletAddr(crypto.CreateAddress(pethwallet.AsEthAddr(onChainCred.Addr), 1))
adjudicatorAddr = adjudicator
assetAddr = asset
} else {
adjudicator = adjudicatorAddr
asset = assetAddr
}
chain, err := ethereum.NewChainBackend(chainURL, ChainConnTimeout, onChainTxTimeout, onChainCred)
if err != nil {
return nil, nil, errors.WithMessage(err, "initializaing chain backend")
}
err = chain.ValidateContracts(adjudicator, asset)
if err != nil {
// Contracts not yet deployed for this ganache-cli instance.
adjudicator, asset, err = deployContracts(chain, onChainCred)
}
return adjudicator, asset, errors.WithMessage(err, "initializaing chain backend")
}
func isBlockchainRunning(url string) bool {
_, _, err := websocket.DefaultDialer.Dial(url, nil)
return err == nil
}
func deployContracts(chain perun.ChainBackend, onChainCred perun.Credential) (adjudicator, asset pwallet.Address,
_ error) {
var err error
adjudicator, err = chain.DeployAdjudicator(onChainCred.Addr)
if err != nil {
return nil, nil, err
}
asset, err = chain.DeployAsset(adjudicator, onChainCred.Addr)
return adjudicator, asset, err
}
|
package model
type CrawlResult struct {
Url Input
Page string
}
|
package authinfo
import (
"testing"
"github.com/stretchr/testify/assert"
)
var (
testAuthInfo = AuthInfo{
Username: "a-user",
Groups: []string{"company", "admin-team"},
}
)
func TestCheckPermittedGroups(t *testing.T) {
assert.False(t, testAuthInfo.CheckGroup("a-team"), "a-team is not a member")
assert.True(t, testAuthInfo.CheckGroup("company"), "company is a member")
assert.True(t, testAuthInfo.CheckGroup("admin-team"),
"admin-team is a member")
}
|
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package rls
import (
"google.golang.org/grpc/balancer/apis"
"sync"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpcsync"
)
var (
_ balancer.Balancer = (*rlsBalancer)(nil)
// For overriding in tests.
newRLSClientFunc = newRLSClient
logger = grpclog.Component("rls")
)
// rlsBalancer implements the RLS LB policy.
type rlsBalancer struct {
done *grpcsync.Event
cc balancer.ClientConn
opts balancer.BuildOptions
// Mutex protects all the state maintained by the LB policy.
// TODO(easwars): Once we add the cache, we will also have another lock for
// the cache alone.
mu sync.Mutex
lbCfg *lbConfig // Most recently received service config.
rlsCC *grpc.ClientConn // ClientConn to the RLS server.
rlsC *rlsClient // RLS client wrapper.
ccUpdateCh chan *balancer.ClientConnState
}
// run is a long running goroutine which handles all the updates that the
// balancer wishes to handle. The appropriate updateHandler will push the update
// on to a channel that this goroutine will select on, thereby the handling of
// the update will happen asynchronously.
func (lb *rlsBalancer) run() {
for {
// TODO(easwars): Handle other updates like subConn state changes, RLS
// responses from the server etc.
select {
case u := <-lb.ccUpdateCh:
lb.handleClientConnUpdate(u)
case <-lb.done.Done():
return
}
}
}
// handleClientConnUpdate handles updates to the service config.
// If the RLS server name or the RLS RPC timeout changes, it updates the control
// channel accordingly.
// TODO(easwars): Handle updates to other fields in the service config.
func (lb *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) {
logger.Infof("rls: service config: %+v", ccs.BalancerConfig)
lb.mu.Lock()
defer lb.mu.Unlock()
if lb.done.HasFired() {
logger.Warning("rls: received service config after balancer close")
return
}
newCfg := ccs.BalancerConfig.(*lbConfig)
if lb.lbCfg.Equal(newCfg) {
logger.Info("rls: new service config matches existing config")
return
}
lb.updateControlChannel(newCfg)
lb.lbCfg = newCfg
}
// UpdateClientConnState pushes the received ClientConnState update on the
// update channel which will be processed asynchronously by the run goroutine.
// Implements balancer.Balancer interface.
func (lb *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
select {
case lb.ccUpdateCh <- &ccs:
case <-lb.done.Done():
}
return nil
}
// ResolverErr implements balancer.Balancer interface.
func (lb *rlsBalancer) ResolverError(error) {
// ResolverError is called by gRPC when the name resolver reports an error.
// TODO(easwars): How do we handle this?
logger.Fatal("rls: ResolverError is not yet unimplemented")
}
// UpdateSubConnState implements balancer.Balancer interface.
func (lb *rlsBalancer) UpdateSubConnState(_ apis.SubConn, _ balancer.SubConnState) {
logger.Fatal("rls: UpdateSubConnState is not yet implemented")
}
// Cleans up the resources allocated by the LB policy including the clientConn
// to the RLS server.
// Implements balancer.Balancer.
func (lb *rlsBalancer) Close() {
lb.mu.Lock()
defer lb.mu.Unlock()
lb.done.Fire()
if lb.rlsCC != nil {
lb.rlsCC.Close()
}
}
// updateControlChannel updates the RLS client if required.
// Caller must hold lb.mu.
func (lb *rlsBalancer) updateControlChannel(newCfg *lbConfig) {
oldCfg := lb.lbCfg
if newCfg.lookupService == oldCfg.lookupService && newCfg.lookupServiceTimeout == oldCfg.lookupServiceTimeout {
return
}
// Use RPC timeout from new config, if different from existing one.
timeout := oldCfg.lookupServiceTimeout
if timeout != newCfg.lookupServiceTimeout {
timeout = newCfg.lookupServiceTimeout
}
if newCfg.lookupService == oldCfg.lookupService {
// This is the case where only the timeout has changed. We will continue
// to use the existing clientConn. but will create a new rlsClient with
// the new timeout.
lb.rlsC = newRLSClientFunc(lb.rlsCC, lb.opts.Target.Endpoint, timeout)
return
}
// This is the case where the RLS server name has changed. We need to create
// a new clientConn and close the old one.
var dopts []grpc.DialOption
if dialer := lb.opts.Dialer; dialer != nil {
dopts = append(dopts, grpc.WithContextDialer(dialer))
}
dopts = append(dopts, dialCreds(lb.opts))
cc, err := grpc.Dial(newCfg.lookupService, dopts...)
if err != nil {
logger.Errorf("rls: dialRLS(%s, %v): %v", newCfg.lookupService, lb.opts, err)
// An error from a non-blocking dial indicates something serious. We
// should continue to use the old control channel if one exists, and
// return so that the rest of the config updates can be processes.
return
}
if lb.rlsCC != nil {
lb.rlsCC.Close()
}
lb.rlsCC = cc
lb.rlsC = newRLSClientFunc(cc, lb.opts.Target.Endpoint, timeout)
}
func dialCreds(opts balancer.BuildOptions) grpc.DialOption {
// The control channel should use the same authority as that of the parent
// channel. This ensures that the identify of the RLS server and that of the
// backend is the same, so if the RLS config is injected by an attacker, it
// cannot cause leakage of private information contained in headers set by
// the application.
server := opts.Target.Authority
switch {
case opts.DialCreds != nil:
if err := opts.DialCreds.OverrideServerName(server); err != nil {
logger.Warningf("rls: OverrideServerName(%s) = (%v), using Insecure", server, err)
return grpc.WithInsecure()
}
return grpc.WithTransportCredentials(opts.DialCreds)
case opts.CredsBundle != nil:
return grpc.WithTransportCredentials(opts.CredsBundle.TransportCredentials())
default:
logger.Warning("rls: no credentials available, using Insecure")
return grpc.WithInsecure()
}
}
|
package main
import (
"fmt"
)
// 声明一个新的类型
type person struct {
name string
age int
}
// 比较两个人的年纪,返回年纪大的那个人,并且返回年纪差
// struct 也是传值的
func Older(p1,p2 person)(person,int){
if p1.age > p1.age { // 比较p1 和 p2 这两个人的年纪
return p1,p1.age-p2.age
}
return p2,p2.age-p1.age
}
func main(){
var tom person
// 赋值初始化
tom.name,tom.age="tom",18
//两个字段都写情书的初始化
bob:=person{age:25,name:"BOb"}
//按照struct定义顺序初始化值
paul:=person{"Paul",43}
tb_Older, tb_diff := Older(tom, bob)
tp_Older, tp_diff := Older(tom, paul)
bp_Older, bp_diff := Older(bob, paul)
fmt.Printf("Of %s and %s, %s is older by %d years\n",
tom.name, bob.name, tb_Older.name, tb_diff)
fmt.Printf("Of %s and %s, %s is older by %d years\n",
tom.name, paul.name, tp_Older.name, tp_diff)
fmt.Printf("Of %s and %s, %s is older by %d years\n",
bob.name, paul.name, bp_Older.name, bp_diff)
} |
package main
import (
"log"
"net/http"
"os"
"strings"
)
const dir = "."
func main() {
fs := http.FileServer(http.Dir(dir))
log.Printf("Serving "+dir+" on http://localhost:%+v", os.Getenv("PORT"))
http.ListenAndServe(":"+os.Getenv("PORT"), http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Header().Add("Cache-Control", "no-cache")
if strings.HasSuffix(req.URL.Path, ".wasm") {
resp.Header().Set("content-type", "application/wasm")
}
fs.ServeHTTP(resp, req)
}))
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
)
func main() {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
input := scanner.Text()
var dat map[string]interface{}
if err := json.Unmarshal([]byte(input), &dat); err != nil {
panic(err)
}
if b, err := json.MarshalIndent(dat, "", " "); err != nil {
panic(err)
} else {
b = append(b, '\n')
os.Stdout.Write(b)
}
}
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, "error:", err)
os.Exit(1)
}
}
|
package main
import (
"bufio"
"fmt"
"log"
"net"
"os"
"runtime"
"strings"
)
func main() {
host := "0.0.0.0"
port, ok := os.LookupEnv("PORT")
if !ok {
port = "9999"
}
err := start(fmt.Sprintf("%s:%s", host, port))
if err != nil {
log.Fatal(err)
}
}
func start(addr string) (err error) {
listener, err := net.Listen("tcp", addr)
if err != nil {
return fmt.Errorf("can't listen %s: %w", addr, err)
}
defer listener.Close()
for {
conn, err := listener.Accept()
log.Print("accept connection")
if err != nil {
log.Printf("can't accept: %v", err)
continue
}
log.Print("handle connection")
handleConn(conn)
}
}
// apache
// nginx
// IIS
// Apache Tomcat, Jetty
// go http server
// Request-Line\r\n
// Headers\r\n
// Headers\r\n
// \r\n
// Body
func handleConn(conn net.Conn) {
defer conn.Close()
log.Print("read request to buffer")
const maxHeaderSize = 4096
reader := bufio.NewReaderSize(conn, maxHeaderSize)
writer := bufio.NewWriter(conn)
counter := 0
buf := [maxHeaderSize]byte{}
// naive header limit
for {
if counter == maxHeaderSize {
log.Printf("too long request header")
writer.WriteString("HTTP/1.1 413 Payload Too Large\r\n")
writer.WriteString("Content-Length: 0\r\n")
writer.WriteString("Connection: close\r\n")
writer.WriteString("\r\n")
writer.Flush()
return
}
read, err := reader.ReadByte()
if err != nil {
log.Printf("can't read request line: %v", err)
writer.WriteString("HTTP/1.1 400 Bad Request\r\n")
writer.WriteString("Content-Length: 0\r\n")
writer.WriteString("Connection: close\r\n")
writer.WriteString("\r\n")
writer.Flush()
return
}
buf[counter] = read
counter++
if counter < 4 {
continue
}
if string(buf[counter-4:counter]) == "\r\n\r\n" {
break
}
}
log.Print("headers found")
headersStr := string(buf[:counter - 4])
headers := make(map[string]string) // TODO: в оригинале map[string][]string
requestHeaderParts := strings.Split(headersStr, "\r\n")
log.Print("parse request line")
requestLine := requestHeaderParts[0]
log.Printf("request line: %s", requestLine)
log.Print("parse headers")
for _, headerLine := range requestHeaderParts[1:] {
headerParts := strings.SplitN(headerLine, ": ", 2)
headers[strings.TrimSpace(headerParts[0])] = strings.TrimSpace(headerParts[1]) // TODO: are we allow empty header?
}
log.Printf("headers: %v", headers)
html := fmt.Sprintf(`<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Document</title>
</head>
<body>
<h1>Hello from golang %s</h1>
</body>
</html>`, runtime.Version())
log.Print("send response")
writer.WriteString("HTTP/1.1 200 OK\r\n")
writer.WriteString(fmt.Sprintf("Content-Length: %d\r\n", len(html)))
writer.WriteString("Connection: close\r\n")
writer.WriteString("\r\n")
writer.WriteString(html)
writer.Flush()
log.Print("done")
return
}
|
package cmd
import (
"errors"
"net"
"github.com/michaelhenkel/gokvm/network"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
var (
subnet string
gateway string
dnsServer string
dhcp bool
networkType string
)
func init() {
cobra.OnInitialize(initNetworkConfig)
createNetworkCmd.PersistentFlags().StringVarP(&subnet, "subnet", "s", "", "")
createNetworkCmd.PersistentFlags().StringVarP(&gateway, "gateway", "g", "", "")
createNetworkCmd.PersistentFlags().StringVarP(&dnsServer, "dnsserver", "d", "", "")
createNetworkCmd.PersistentFlags().BoolVarP(&dhcp, "dhcp", "a", true, "")
createNetworkCmd.PersistentFlags().StringVarP(&networkType, "type", "t", "bridge", "")
}
func initNetworkConfig() {
}
func createNetwork() error {
if name == "" {
log.Fatal("Name is required")
}
if err := checkSubnet(subnet); err != nil {
log.Fatal(err)
}
if err := checkGateway(gateway, subnet); err != nil {
log.Fatal(err)
}
if err := checkDNS(dnsServer, subnet); err != nil {
log.Fatal(err)
}
if err := checkNetworkType(networkType); err != nil {
log.Fatal(err)
}
_, snipnet, err := net.ParseCIDR(subnet)
if err != nil {
return err
}
var gatewayIP, dnsIP net.IP
if gateway == "" {
_, gatewayNet, err := net.ParseCIDR(subnet)
if err != nil {
return err
}
gatewayIP = gatewayNet.IP
inc(gatewayIP)
} else {
gatewayIP = net.ParseIP(gateway)
}
if dnsServer == "" {
dnsIP = gatewayIP
} else {
dnsIP = net.ParseIP(dnsServer)
}
newNetwork := &network.Network{
Name: name,
DHCP: dhcp,
Subnet: snipnet,
DNSServer: dnsIP,
Gateway: gatewayIP,
Type: network.NetworkType(networkType),
}
if err := newNetwork.Create(); err != nil {
return err
}
return nil
}
func inc(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
var createNetworkCmd = &cobra.Command{
Use: "network",
Short: "creates a network",
Long: `All software has versions. This is Hugo's`,
Run: func(cmd *cobra.Command, args []string) {
if err := createNetwork(); err != nil {
panic(err)
}
},
}
var deleteNetworkCmd = &cobra.Command{
Use: "network",
Short: "deletes a network",
Long: `All software has versions. This is Hugo's`,
Run: func(cmd *cobra.Command, args []string) {
if err := deleteNetwork(); err != nil {
panic(err)
}
},
}
var listNetworkCmd = &cobra.Command{
Use: "network",
Short: "lists networks",
Long: `All software has versions. This is Hugo's`,
Run: func(cmd *cobra.Command, args []string) {
if err := listNetwork(); err != nil {
panic(err)
}
},
}
func listNetwork() error {
networks, err := network.List()
if err != nil {
return err
}
network.Render(networks)
return nil
}
func checkSubnet(subnet string) error {
if subnet == "" {
return errors.New("subnet must be specified")
}
_, _, err := net.ParseCIDR(subnet)
if err != nil {
return err
}
return nil
}
func deleteNetwork() error {
if name == "" {
log.Fatal("Name is required")
}
newNetwork := &network.Network{
Name: name,
}
if err := newNetwork.Delete(); err != nil {
return err
}
return nil
}
func checkGateway(gateway string, subnet string) error {
if gateway != "" {
if net.ParseIP(gateway) == nil {
return errors.New("invalid gateway ip")
}
_, ipnet, err := net.ParseCIDR(subnet)
if err != nil {
return err
}
if !ipnet.Contains(net.ParseIP(gateway)) {
return errors.New("gateway ip not part of subnet")
}
}
return nil
}
func checkDNS(dns string, subnet string) error {
if dns != "" {
if net.ParseIP(dns) == nil {
return errors.New("invalid dns ip")
}
_, ipnet, err := net.ParseCIDR(subnet)
if err != nil {
return err
}
if !ipnet.Contains(net.ParseIP(dns)) {
return errors.New("dns ip not part of subnet")
}
}
return nil
}
func checkNetworkType(networkType string) error {
if networkType != "" {
if networkType != string(network.OVS) && networkType != string(network.BRIDGE) {
return errors.New("invalid networkType")
}
}
return nil
}
|
package main
import (
"time"
"github.com/gin-gonic/gin"
)
type timer struct{}
type timerInterface interface {
Sleep(d time.Duration)
}
func (t *timer) Sleep(d time.Duration) {
time.Sleep(d)
}
func timerMiddleware(c *gin.Context) {
c.Set("timerAPI", &timer{})
c.Next()
}
|
package domain
import (
"time"
"github.com/traPtitech/trap-collection-server/src/domain/values"
)
type OIDCSession struct {
accessToken values.OIDCAccessToken
expiresAt time.Time
}
func NewOIDCSession(accessToken values.OIDCAccessToken, expiresAt time.Time) *OIDCSession {
return &OIDCSession{
accessToken: accessToken,
expiresAt: expiresAt,
}
}
func (s *OIDCSession) GetAccessToken() values.OIDCAccessToken {
return s.accessToken
}
func (s *OIDCSession) GetExpiresAt() time.Time {
return s.expiresAt
}
func (s *OIDCSession) IsExpired() bool {
return s.expiresAt.Before(time.Now())
}
|
package Largest_Rectangle_in_Histogram
func largestRectangleArea(heights []int) int {
if len(heights) == 0 {
return 0
}
max := -1
s := NewStack(len(heights))
for i := 0; i < len(heights); i++ {
// if heights[i] <= heights[s.Back()], s.Pop(), and calculate area
for s.Len() > 0 && heights[i] <= heights[s.Back()] {
p := s.Pop()
area := heights[p] * (i - s.Back() - 1)
if area > max {
max = area
}
}
s.Push(i)
}
right := len(heights)
for s.Len() != 0 {
left := s.Pop()
area := heights[left] * (right - s.Back() - 1)
if area > max {
max = area
}
}
return max
}
type stack struct {
data []int
index int
}
func NewStack(cap int) *stack {
return &stack{
data: make([]int, cap),
index: -1,
}
}
func (s *stack) Push(v int) {
s.index++
s.data[s.index] = v
}
func (s *stack) Pop() int {
if s.index < 0 {
return -1
}
v := s.data[s.index]
s.index--
return v
}
func (s *stack) Len() int {
return s.index + 1
}
func (s *stack) Back() int {
if s.index < 0 {
return -1
}
return s.data[s.index]
}
|
package lib
import (
"time"
vegeta "github.com/tsenart/vegeta/lib"
)
// uses the vegeta library to run a load test against a target
func LoadTest(target string, duration time.Duration, requestsPerScond int) vegeta.Metrics {
rate := vegeta.Rate{Freq: requestsPerScond, Per: time.Second}
targeter := vegeta.NewStaticTargeter(vegeta.Target{
Method: "GET",
URL: target,
})
attacker := vegeta.NewAttacker(
vegeta.Timeout(30 * time.Second),
)
var metrics vegeta.Metrics
for res := range attacker.Attack(targeter, rate, duration, "Big Bang!") {
metrics.Add(res)
}
metrics.Close()
return metrics
}
|
package main
import (
"html/template"
"net/http"
"sort"
"github.com/bake/qual-o-mat/qualomat"
"github.com/pkg/errors"
)
type sortByDate []*qualomat.Election
func (e sortByDate) Len() int { return len(e) }
func (e sortByDate) Less(i, j int) bool { return e[i].Date.Unix() < e[j].Date.Unix() }
func (e sortByDate) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (s *server) handleElections() http.HandlerFunc {
tmpl, err := template.ParseFiles("templates/main.html", "templates/elections.html")
if err != nil {
return s.handleError(errors.Wrap(err, "could not parse template"), 500)
}
s.mu.Lock()
defer s.mu.Unlock()
elections, err := s.qom.Elections()
if err != nil {
return s.handleError(errors.Wrap(err, "could not parse elections"), 500)
}
sort.Sort(sort.Reverse(sortByDate(elections)))
type response struct{ Elections []*qualomat.Election }
return func(w http.ResponseWriter, r *http.Request) {
tmpl.Execute(w, response{elections})
}
}
|
package main
import (
"fmt"
"runtime"
"sync"
)
/**
如果map由多协程同时读和写就会出现 fatal error:concurrent map read and map write的错误
如下代码很容易就出现map并发读写问题
**/
// ERROR CODE 1
type UserAges struct {
ages map[string]int
sync.Mutex
}
func (ua *UserAges) Add(name string, age int) {
ua.Lock()
defer ua.Unlock()
ua.ages[name] = age
}
func (ua *UserAges) Get(name string) int {
if age, ok := ua.ages[name]; ok {
return age
}
return -1
}
// ERROR CODE 2
func main() {
c := make(map[string]int)
// 开一个协程写map
go func() {
for j := 0; j < 1000; j++ {
c[fmt.Sprintf("%d", j)] = j
}
}()
// 开一个协程读map
go func() {
for j := 0; j < 1000; j++ {
fmt.println(c[fmt.Sprintf("%d", j)])
}
}
time.Sleep(time.Second * 20)
}
/**
解决方法
**/
// 1. 通用锁
type Demo struct {
Data map[string]string
Lock sync.Mutex
}
func (d Demo) Get(k string) string {
d.Lock.Lock()
defer d.Lock.Unlock()
return d.Data[k]
}
func (d Demo) Set(k, v string) {
d.Lock.Lock()
defer d.Lock.Unlock()
d.Data[k] = v
}
// 2. 读写锁
type MapTplResCode struct {
Data map[string]int
Lock *sync.RWMutex
}
func (d MapTplResCode) Get(k string) (int, bool) {
d.Lock.RLock()
defer d.Lock.RUnlock()
if v, ok := d.Data[k]; ok {
return v, true
}
return 0, false
}
func (d MapTplResCode) Set(k string, v int) {
d.Lock.Lock()
defer d.Lock.Unlock()
d.Data[k] = v
}
func (d MapTplResCode) Init() {
d.Lock.Lock()
defer d.Lock.Unlock()
for key, _ := range d.Data {
delete(d.Data, key)
}
}
|
package password
import(
"crypto/rand"
"encoding/base64"
"log"
)
const(
PASSLEN int = 8
)
func GenerateRandomPassword() string{
pass := make([]byte,PASSLEN)
rand.Read(pass)
finalpass := base64.URLEncoding.EncodeToString(pass)
return string(finalpass[0:PASSLEN])
}
func GenerateRandomPasswords(noOfPasswords int) []string{
log.Println("*Generating random passwords*")
myPasswords := make([]string,noOfPasswords)
for i := 0; i < noOfPasswords; i++{
myPasswords[i] = GenerateRandomPassword()
}
return myPasswords
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package base
import (
"context"
"time"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
var errEnterpriseNotEnabled = errors.New("OSS binaries do not include enterprise features")
// CheckEnterpriseEnabled returns a non-nil error if the requested enterprise
// feature is not enabled, including information or a link explaining how to
// enable it.
//
// This function is overridden by an init hook in CCL builds.
var CheckEnterpriseEnabled = func(_ *cluster.Settings, _ uuid.UUID, org, feature string) error {
return errEnterpriseNotEnabled // nb: this is squarely in the hot path on OSS builds
}
// TimeToEnterpriseLicenseExpiry returns a duration object that measures the time until
// the currently set enterprise license expires starting from the 3rd argument
// passed in.
//
// This function is overridden by an init hook in CCL builds
var TimeToEnterpriseLicenseExpiry = func(
ctx context.Context, _ *cluster.Settings, _ time.Time,
) (time.Duration, error) {
return 0, nil
}
// LicenseType returns what type of license the cluster is running with, or
// "OSS" if it is an OSS build.
//
// This function is overridden by an init hook in CCL builds.
var LicenseType = func(st *cluster.Settings) (string, error) {
return "OSS", nil
}
|
package main
import "net/http"
func main() {
print("server is running on http://0.0.0.0:3000")
http.ListenAndServe(":3000", http.FileServer(http.Dir(".")))
}
|
package epaxospb
import (
"github.com/google/btree"
)
// Less implements the btree.Item interface.
func (ihs *InstanceState) Less(than btree.Item) bool {
return ihs.InstanceNum < than.(*InstanceState).InstanceNum
}
|
package corp
import "fmt"
type ICorp interface {
GetInfo() string
SetParent(corp ICorp)
GetParent() ICorp
}
type IBranch interface {
AddSubordinate(corp ICorp)
GetSubordinateInfo() []ICorp
}
//---------------------------------------------------------------
type Corp struct {
name string
position string
salary int
Parent ICorp
}
func NewCorp(name, position string, salary int) *Corp {
return &Corp{
name: name,
position: position,
salary: salary,
}
}
func (c *Corp) GetInfo() string {
return fmt.Sprintf("name: %s, position: %s, salary: %d", c.name, c.position, c.salary)
}
func (c *Corp) SetParent(corp ICorp) {
c.Parent = corp
}
func (c *Corp) GetParent() ICorp {
return c.Parent
}
//------------------------------------------------------------------------------
type Leaf struct {
Corp
}
func NewLeaf(name, position string, salary int) *Leaf {
return &Leaf{
*NewCorp(name, position, salary),
}
}
//------------------------------------------------------------------------------
type Branch struct {
Corp
SubordinateList []ICorp
}
func NewBranch(name, position string, salary int) *Branch {
return &Branch{
*NewCorp(name, position, salary),
[]ICorp{},
}
}
func (b *Branch) AddSubordinate(corp ICorp) {
corp.SetParent(b)
b.SubordinateList = append(b.SubordinateList, corp)
}
func (b *Branch) GetSubordinateInfo() []ICorp {
return b.SubordinateList
}
|
package taoke
import (
"fmt"
"bytes"
"common"
"errors"
"io/ioutil"
"encoding/json"
"github.com/mahonia"
log "code.google.com/p/log4go"
)
type ItemInfo struct {
Date string
Id string
Name string
ShopId string
ShopName string
Count string
Price string
State string
Transaction string
Commission string
Income string
}
func GetTaokeDetail(account, startTime, endTime string) (data []byte, err error) {
log.Info("request: %s, %s, %s", account, startTime, endTime)
items := make([]ItemInfo, 0)
page := 1
for {
have := false
searchurl := fmt.Sprintf("http://u.alimama.com/union/newreport/taobaokeDetail.htm?toPage=%d&perPageSize=20&startTime=%s&endTime=%s", page, startTime, endTime)
log.Error(searchurl)
body, err := common.GetPage(account, searchurl)
if err != nil {
return nil, err
}
i := bytes.Index(body, []byte("charset=GBK"))
if i != -1 {
d:=mahonia.NewDecoder("gbk")
r := d.NewReader(bytes.NewBuffer(body))
body, _ = ioutil.ReadAll(r)
}
/* login */
i = bytes.Index(body, []byte("<title>阿里妈妈-阿里妈妈登录页面</title>"))
if i != -1 {
return nil, errors.New("account need login.")
}
/* when parse error, log page */
defer func() {
if data == nil {
log.Error(string(body))
}
}()
i = bytes.Index(body, []byte("<table class=\"med-table med-list-s\">"))
if i == -1 {
return nil, errors.New("1parse taoke detail page failed")
}
start := bytes.Index(body[i:], []byte("<tbody>"))
if start == -1 {
return nil, errors.New("2parse taoke detail page failed")
}
i = i + start + len("<tbody>")
end := bytes.Index(body[i:], []byte("</tbody>"))
if end == -1 {
return nil, errors.New("3parse taoke detail page failed")
}
/* error */
ei := bytes.Index(body[i:], []byte("<div class=\"med-tip\">"))
if ei != -1 {
break
}
trs := bytes.Split(bytes.TrimSpace(body[i:i+end]), []byte("<tr>"))
for _, tr := range(trs) {
if len(tr) == 0 {
continue
}
i = bytes.Index(tr, []byte("</tr>"))
if i == -1 {
return nil, errors.New("4parse taoke detail page failed")
}
tr = bytes.TrimSpace(tr[:i])
tds := bytes.Split(tr, []byte("<td"))
item := ItemInfo{}
for index, td := range(tds) {
if len(td) == 0 {
continue
}
i = bytes.Index(td, []byte("</td>"))
if i == -1 {
return nil, errors.New("5parse taoke detail page failed")
}
td = bytes.TrimSpace(td[:i])
switch index {
case 1:
i = bytes.Index(td, []byte(">"))
if i == -1 {
return nil, errors.New("6parse taoke detail page failed")
}
item.Date = string(td[i+1:])
case 2:
i = bytes.Index(td, []byte("id="))
if i == -1 {
return nil, errors.New("7parse taoke detail page failed")
}
td = td[i+3:]
i = bytes.Index(td, []byte("\""))
if i == -1 {
return nil, errors.New("8parse taoke detail page failed")
}
//
item.Id = string(td[:i])
td = td[i+2:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("8parse taoke detail page failed")
}
//
item.Name = string(td[:i])
td = td[i:]
i = bytes.Index(td, []byte("oid="))
if i == -1 {
return nil, errors.New("8parse taoke detail page failed")
}
td = td[i+4:]
i = bytes.Index(td, []byte("\""))
if i == -1 {
return nil, errors.New("8parse taoke detail page failed")
}
item.ShopId = string(td[:i])
td = td[i:]
i = bytes.Index(td, []byte(">"))
if i == -1 {
return nil, errors.New("8parse taoke detail page failed")
}
td = td[i+1:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("8parse taoke detail page failed")
}
item.ShopName = string(td[:i])
case 3:
i = bytes.Index(td, []byte("2\">"))
if i == -1 {
return nil, errors.New("9parse taoke detail page failed")
}
td = td[i+3:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("10parse taoke detail page failed")
}
item.Count = string(td[:i])
case 4:
i = bytes.Index(td, []byte("/i>"))
if i == -1 {
return nil, errors.New("11parse taoke detail page failed")
}
td = td[i+3:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("12parse taoke detail page failed")
}
item.Price = string(td[:i])
case 5:
i = bytes.Index(td, []byte("<span"))
if i == -1 {
log.Info(string(td))
return nil, errors.New("13parse taoke detail page failed")
}
td = td[i:]
i = bytes.Index(td, []byte(">"))
if i == -1 {
return nil, errors.New("14parse taoke detail page failed")
}
td = td[i+1:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("15parse taoke detail page failed")
}
item.State = string(td[:i])
case 6:
continue
case 7:
i = bytes.Index(td, []byte("/i>"))
if i == -1 {
return nil, errors.New("16parse taoke detail page failed")
}
td = td[i+3:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("17parse taoke detail page failed")
}
item.Transaction = string(td[:i])
case 8:
i = bytes.Index(td, []byte("2\">"))
if i == -1 {
return nil, errors.New("18parse taoke detail page failed")
}
td = td[i+3:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("19parse taoke detail page failed")
}
item.Commission = string(td[:i])
case 9:
continue
case 10:
continue
case 11:
i = bytes.Index(td, []byte("/i>"))
if i == -1 {
return nil, errors.New("20parse taoke detail page failed")
}
td = td[i+3:]
i = bytes.Index(td, []byte("<"))
if i == -1 {
return nil, errors.New("21parse taoke detail page failed")
}
item.Income = string(td[:i])
}
}
have = true
items = append(items, item)
}
if !have {
break
}
page++
}
data, err = json.Marshal(items)
if err != nil {
return nil, err
}
return data, nil
}
|
package test
import (
"bytes"
"image"
"image/jpeg"
"io/ioutil"
"os"
"path"
"testing"
"github.com/stretchr/testify/require"
)
func RootDir(t *testing.T, level int) string {
dir, err := os.Getwd()
require.NoError(t, err)
for i := 0; i < level; i++ {
dir = path.Dir(dir)
}
return dir
}
func SampleImage(t *testing.T, level int) image.Image {
const imagePath = "test/testdata/nature.jpg"
file, err := ioutil.ReadFile(path.Join(RootDir(t, level), imagePath))
require.NoError(t, err)
img, err := jpeg.Decode(bytes.NewReader(file))
require.NoError(t, err)
return img
}
|
package cluster
import (
"fmt"
"time"
"github.com/zdnscloud/cement/log"
"github.com/zdnscloud/cluster-agent/monitor/event"
"github.com/zdnscloud/cluster-agent/monitor/namespace"
"github.com/zdnscloud/cluster-agent/monitor/node"
"github.com/zdnscloud/gok8s/client"
)
type Monitor struct {
cli client.Client
stopCh chan struct{}
eventCh chan interface{}
}
type Cluster struct {
Cpu int64
CpuUsed int64
Memory int64
MemoryUsed int64
Pod int64
PodUsed int64
StorageInfo map[string]event.StorageSize
}
func New(cli client.Client, ch chan interface{}) *Monitor {
return &Monitor{
cli: cli,
stopCh: make(chan struct{}),
eventCh: ch,
}
}
func (m *Monitor) Stop() {
log.Infof("stop cluster monitor")
m.stopCh <- struct{}{}
<-m.stopCh
}
func (m *Monitor) Start(cfg *event.MonitorConfig) {
log.Infof("start cluster monitor")
for {
select {
case <-m.stopCh:
m.stopCh <- struct{}{}
return
default:
}
cluster := getCluster(m.cli)
m.check(cluster, cfg)
time.Sleep(time.Duration(event.CheckInterval) * time.Second)
}
}
func (m *Monitor) check(cluster *Cluster, cfg *event.MonitorConfig) {
if cluster.Cpu > 0 && cfg.Cpu > 0 {
if ratio := (cluster.CpuUsed * event.Denominator) / cluster.Cpu; ratio > cfg.Cpu {
m.eventCh <- event.Event{
Kind: event.ClusterKind,
Message: fmt.Sprintf("High cpu utilization %d%% in cluster", ratio),
}
log.Infof("The cpu utilization of the cluster is %d%%, higher than the threshold set by the user %d%%", ratio, cfg.Cpu)
}
}
if cluster.Memory > 0 && cfg.Memory > 0 {
if ratio := (cluster.MemoryUsed * event.Denominator) / cluster.Memory; ratio > cfg.Memory {
m.eventCh <- event.Event{
Kind: event.ClusterKind,
Message: fmt.Sprintf("High memory utilization %d%% in cluster", ratio),
}
log.Infof("The memory utilization of the cluster is %d%%, higher than the threshold set by the user %d%%", ratio, cfg.Memory)
}
}
if cluster.Pod > 0 && cfg.PodCount > 0 {
if ratio := (cluster.PodUsed * event.Denominator) / cluster.Pod; ratio > cfg.PodCount {
m.eventCh <- event.Event{
Kind: event.ClusterKind,
Message: fmt.Sprintf("High podcount utilization %d%% in cluster", ratio),
}
log.Infof("The podcount utilization of the cluster is %d%%, higher than the threshold set by the user %d%%", ratio, cfg.PodCount)
}
}
if cfg.Storage > 0 {
for name, size := range cluster.StorageInfo {
if size.Total > 0 {
if ratio := (size.Used * event.Denominator) / size.Total; ratio > cfg.Storage {
m.eventCh <- event.Event{
Kind: event.ClusterKind,
Message: fmt.Sprintf("High storage utilization %d%% for storage type %s in cluster", ratio, name),
}
log.Infof("The storage utilization of the type %s is %d%%, higher than the threshold set by the user %d%%", name, ratio, cfg.Storage)
}
}
}
}
}
func getCluster(cli client.Client) *Cluster {
var cluster Cluster
cluster.StorageInfo = namespace.GetStorage(cli)
for _, node := range node.GetNodes(cli) {
cluster.Cpu += node.Cpu
cluster.CpuUsed += node.CpuUsed
cluster.Memory += node.Memory
cluster.MemoryUsed += node.MemoryUsed
cluster.Pod += node.Pod
cluster.PodUsed += node.PodUsed
}
return &cluster
}
|
package dmsghttp
// import (
// "io"
// "net/http"
// )
// func NewRequest2(method, url string, body io.Reader) (*http.Request, error) {
// req, err := http.NewRequest(method, url, body)
// req.Proto = "HTTP/dmsg"
// return req, err
// }
|
package server
import (
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/gempir/gempbot/internal/api"
"github.com/gempir/gempbot/internal/store"
)
func (a *Api) BlocksHandler(w http.ResponseWriter, r *http.Request) {
authResp, _, apiErr := a.authClient.AttemptAuth(r, w)
if apiErr != nil {
return
}
userID := authResp.Data.UserID
if r.URL.Query().Get("managing") != "" {
userID, apiErr = a.userAdmin.CheckEditor(r, a.userAdmin.GetUserConfig(userID))
if apiErr != nil {
http.Error(w, apiErr.Error(), apiErr.Status())
return
}
}
if r.Method == http.MethodGet {
page := r.URL.Query().Get("page")
if page == "" {
page = "1"
}
pageNumber, err := strconv.Atoi(page)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
blocks := a.db.GetEmoteBlocks(userID, pageNumber, 20)
api.WriteJson(w, blocks, http.StatusOK)
return
}
if r.Method == http.MethodPatch {
var req blockRequest
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
toBlock := []string{}
for _, emote := range strings.Split(req.EmoteIds, ",") {
toBlock = append(toBlock, strings.TrimSpace(emote))
}
err = a.db.BlockEmotes(userID, toBlock, req.EmoteType)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
if r.Method == http.MethodDelete {
var req deleteRequest
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = a.db.DeleteEmoteBlock(userID, req.EmoteID, req.Type)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
type deleteRequest struct {
store.EmoteBlock
}
type blockRequest struct {
EmoteIds string `json:"emoteIds"`
EmoteType string `json:"type"`
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package typeconv
import (
"fmt"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/duration"
)
// DatumVecCanonicalTypeFamily is the "canonical" type family of all types that
// are physically represented by coldata.DatumVec.
var DatumVecCanonicalTypeFamily = types.Family(1000000)
// TypeFamilyToCanonicalTypeFamily converts all type families to their
// "canonical" counterparts. "Canonical" type families are representatives
// from a set of "equivalent" type families where "equivalence" means having
// the same physical representation.
//
// All type families that do not have an optimized physical representation are
// handled by using tree.Datums, and such types are mapped to
// DatumVecCanonicalTypeFamily.
func TypeFamilyToCanonicalTypeFamily(family types.Family) types.Family {
switch family {
case types.BoolFamily:
return types.BoolFamily
case types.BytesFamily, types.StringFamily, types.UuidFamily:
return types.BytesFamily
case types.DecimalFamily:
return types.DecimalFamily
case types.IntFamily, types.DateFamily:
return types.IntFamily
case types.FloatFamily:
return types.FloatFamily
case types.TimestampTZFamily, types.TimestampFamily:
return types.TimestampTZFamily
case types.IntervalFamily:
return types.IntervalFamily
default:
// TODO(yuzefovich): consider adding native support for
// types.UnknownFamily.
return DatumVecCanonicalTypeFamily
}
}
// ToCanonicalTypeFamilies converts typs to the corresponding canonical type
// families.
func ToCanonicalTypeFamilies(typs []*types.T) []types.Family {
families := make([]types.Family, len(typs))
for i := range typs {
families[i] = TypeFamilyToCanonicalTypeFamily(typs[i].Family())
}
return families
}
// UnsafeFromGoType returns the type for a Go value, if applicable. Shouldn't
// be used at runtime. This method is unsafe because multiple logical types can
// be represented by the same physical type. Types that are backed by DatumVec
// are *not* supported by this function.
func UnsafeFromGoType(v interface{}) *types.T {
switch t := v.(type) {
case int16:
return types.Int2
case int32:
return types.Int4
case int, int64:
return types.Int
case bool:
return types.Bool
case float64:
return types.Float
case []byte:
return types.Bytes
case string:
return types.String
case apd.Decimal:
return types.Decimal
case time.Time:
return types.TimestampTZ
case duration.Duration:
return types.Interval
default:
panic(fmt.Sprintf("type %s not supported yet", t))
}
}
|
package handler
import (
"database/sql"
"fmt"
"github.com/ysugimoto/husky"
)
func Accept(d *husky.Dispatcher) {
db := husky.NewDb(GetDSN())
req := d.Input.GetRequest()
token := req.Header.Get("X-LAP-Token")
if token == "" {
SendError(d, "Accept Error")
return
}
// match token
var userName string
row := db.Select("name").Where("token", "=", token).GetRow("pb_users")
if err := row.Scan(&userName); err != nil || err == sql.ErrNoRows {
fmt.Printf("%v\n", err)
message := "Token not matched"
SendError(d, message)
return
}
SendOK(d, userName)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package network
import (
"context"
"fmt"
"regexp"
"strings"
"time"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/bundles/cros/network/proxy"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/dbusutil"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SystemProxyForSystemServices,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test that tlsdated can successfully connect to a web endpoint through the system-proxy daemon",
Contacts: []string{
"acostinas@google.com", // Test author
"chromeos-commercial-networking@google.com",
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline", "informational"},
Fixture: "chromeEnrolledLoggedIn",
})
}
func SystemProxyForSystemServices(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
const username = "testuser"
const password = "testpwd"
// Start an HTTP proxy instance on the DUT which requires username and password authentication.
ps := proxy.NewServer()
defer ps.Stop(ctx)
cred := &proxy.AuthCredentials{Username: username, Password: password}
err := ps.Start(ctx, 3128, cred, []string{})
if err != nil {
s.Fatal("Failed to start a local proxy on the DUT: ", err)
}
// Configure the proxy on the DUT via policy to point to the local proxy instance started via the `ProxyService`.
proxyModePolicy := &policy.ProxyMode{Val: "fixed_servers"}
proxyServerPolicy := &policy.ProxyServer{Val: fmt.Sprintf("http://%s", ps.HostAndPort)}
// Start system-proxy and configure it with the credentials of the local proxy instance.
systemProxySettingsPolicy := &policy.SystemProxySettings{
Val: &policy.SystemProxySettingsValue{
SystemProxyEnabled: true,
SystemServicesUsername: username,
SystemServicesPassword: password,
PolicyCredentialsAuthSchemes: []string{},
}}
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Fatal("Failed to clean up: ", err)
}
// Update policies.
if err := policyutil.ServeAndRefresh(ctx, fdms, cr, []policy.Policy{proxyModePolicy, proxyServerPolicy, systemProxySettingsPolicy}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
if err = waitForSignal(ctx); err != nil {
s.Fatal("Failed to observer event: ", err)
}
// It may take some time for Chrome to process the system-proxy worker active signal.
if err := testing.Poll(ctx, func(ctx context.Context) error {
if err := runTLSDate(ctx); err != nil {
return err
}
return nil
}, &testing.PollOptions{Interval: 500 * time.Millisecond, Timeout: 15 * time.Second}); err != nil {
s.Fatal("Not all targets finished closing: ", err)
}
}
// runTLSDate runs tlsdate once, in the foreground. Returns an error if tlsdate didn't use system-proxy to connect to the web
// endpoint or if the certificate verification failed.
func runTLSDate(ctx context.Context) error {
// tlsdated is a CrOS daemon that runs the tlsdate binary periodically in the background and does proxy resolution through Chrome.
// The `-m <n>` option means tlsdate should run at most once every n seconds in steady state
// The `-p` option means dry run.
// The `-o` option means exit tlsdated after running once
out, err := testexec.CommandContext(ctx, "/usr/bin/tlsdated", "-o", "-p", "-m", "60", "--", "/usr/bin/tlsdate", "-v", "-C", "/usr/share/chromeos-ca-certificates", "-l").CombinedOutput()
// The exit code 124 indicates that timeout sent a SIGTERM to terminate tlsdate.
if err != nil && !strings.Contains(err.Error(), "Process exited with status 124") {
return errors.Wrap(err, "error running tlsdate")
}
var result = string(out)
// system-proxy has an address in the 100.115.92.0/24 subnet (assigned by patchpanel) and listens on port 3128.
proxyMsg := regexp.MustCompile("V: using proxy http://100.115.92.[0-9]+:3128")
const successMsg = "V: certificate verification passed"
if !proxyMsg.Match(out) {
return errors.Errorf("tlsdated is not using the system-proxy daemon: %s", result)
}
if !strings.Contains(result, successMsg) {
return errors.Errorf("certificate verification failed: %s", result)
}
return nil
}
func waitForSignal(ctx context.Context) error {
match := dbusutil.MatchSpec{
Type: "signal",
Path: "/org/chromium/SystemProxy",
Interface: "org.chromium.SystemProxy",
Member: "WorkerActive",
}
signal, err := dbusutil.NewSignalWatcherForSystemBus(ctx, match)
defer signal.Close(ctx)
return err
}
|
package movetaskorder
import (
"errors"
"fmt"
"time"
"github.com/gobuffalo/pop/v5"
"github.com/gobuffalo/validate/v3"
movetaskorderops "github.com/transcom/mymove/pkg/gen/primeapi/primeoperations/move_task_order"
"github.com/transcom/mymove/pkg/models"
"github.com/transcom/mymove/pkg/services"
"github.com/transcom/mymove/pkg/services/query"
"github.com/transcom/mymove/pkg/unit"
"github.com/gofrs/uuid"
)
type moveTaskOrderUpdater struct {
db *pop.Connection
moveTaskOrderFetcher
builder UpdateMoveTaskOrderQueryBuilder
serviceItemCreator services.MTOServiceItemCreator
}
// NewMoveTaskOrderUpdater creates a new struct with the service dependencies
func NewMoveTaskOrderUpdater(db *pop.Connection, builder UpdateMoveTaskOrderQueryBuilder, serviceItemCreator services.MTOServiceItemCreator) services.MoveTaskOrderUpdater {
return &moveTaskOrderUpdater{db, moveTaskOrderFetcher{db}, builder, serviceItemCreator}
}
//MakeAvailableToPrime updates the status of a MoveTaskOrder for a given UUID to make it available to prime
func (o moveTaskOrderUpdater) MakeAvailableToPrime(moveTaskOrderID uuid.UUID, eTag string,
includeServiceCodeMS bool, includeServiceCodeCS bool) (*models.Move, error) {
var err error
var verrs *validate.Errors
searchParams := services.FetchMoveTaskOrderParams{
IncludeHidden: false,
}
move, err := o.FetchMoveTaskOrder(moveTaskOrderID, &searchParams)
if err != nil {
return &models.Move{}, err
}
// Fail early if the Order is invalid due to missing required fields
order := move.Orders
o.db.Load(&order, "Moves")
if verrs, err = o.db.ValidateAndUpdate(&order); verrs.HasAny() || err != nil {
return &models.Move{}, services.NewInvalidInputError(move.ID, nil, verrs, "")
}
if move.AvailableToPrimeAt == nil {
// update field for move
now := time.Now()
move.AvailableToPrimeAt = &now
if move.Status == models.MoveStatusSUBMITTED {
err = move.Approve()
if err != nil {
return &models.Move{}, services.NewConflictError(move.ID, err.Error())
}
}
verrs, err = o.builder.UpdateOne(move, &eTag)
if verrs != nil && verrs.HasAny() {
return &models.Move{}, services.NewInvalidInputError(move.ID, nil, verrs, "")
}
if err != nil {
switch err.(type) {
case query.StaleIdentifierError:
return nil, services.NewPreconditionFailedError(move.ID, err)
default:
return &models.Move{}, err
}
}
// When provided, this will auto create and approve MTO level service items. This is going to typically happen
// from the ghc api via the office app. The handler in question is this one: UpdateMoveTaskOrderStatusHandlerFunc
// in ghcapi/move_task_order.go
if includeServiceCodeMS {
// create if doesn't exist
_, verrs, err = o.serviceItemCreator.CreateMTOServiceItem(&models.MTOServiceItem{
MoveTaskOrderID: moveTaskOrderID,
MTOShipmentID: nil,
ReService: models.ReService{Code: models.ReServiceCodeMS},
Status: models.MTOServiceItemStatusApproved,
ApprovedAt: &now,
})
}
if err != nil {
if errors.Is(err, models.ErrInvalidTransition) {
return &models.Move{}, services.NewConflictError(move.ID, err.Error())
}
return &models.Move{}, err
}
if verrs != nil {
return &models.Move{}, verrs
}
if includeServiceCodeCS {
// create if doesn't exist
_, verrs, err = o.serviceItemCreator.CreateMTOServiceItem(&models.MTOServiceItem{
MoveTaskOrderID: moveTaskOrderID,
MTOShipmentID: nil,
ReService: models.ReService{Code: models.ReServiceCodeCS},
Status: models.MTOServiceItemStatusApproved,
ApprovedAt: &now,
})
}
if err != nil {
if errors.Is(err, models.ErrInvalidTransition) {
return &models.Move{}, services.NewConflictError(move.ID, err.Error())
}
return &models.Move{}, err
}
if verrs != nil {
return &models.Move{}, verrs
}
// CreateMTOServiceItem may have updated the move status so refetch as to not return incorrect status
// TODO: Modify CreateMTOServiceItem to return the updated move or refactor to operate on the passed in reference
move, err = o.FetchMoveTaskOrder(moveTaskOrderID, nil)
if err != nil {
return &models.Move{}, err
}
}
return move, nil
}
// UpdateMoveTaskOrderQueryBuilder is the query builder for updating MTO
type UpdateMoveTaskOrderQueryBuilder interface {
UpdateOne(model interface{}, eTag *string) (*validate.Errors, error)
}
func (o *moveTaskOrderUpdater) UpdatePostCounselingInfo(moveTaskOrderID uuid.UUID, body movetaskorderops.UpdateMTOPostCounselingInformationBody, eTag string) (*models.Move, error) {
var moveTaskOrder models.Move
err := o.db.Q().EagerPreload(
"Orders.NewDutyStation.Address",
"Orders.ServiceMember",
"Orders.Entitlement",
"MTOShipments",
"PaymentRequests",
).Find(&moveTaskOrder, moveTaskOrderID)
if err != nil {
return nil, services.NewNotFoundError(moveTaskOrderID, "while looking for moveTaskOrder.")
}
estimatedWeight := unit.Pound(body.PpmEstimatedWeight)
moveTaskOrder.PPMType = &body.PpmType
moveTaskOrder.PPMEstimatedWeight = &estimatedWeight
verrs, err := o.builder.UpdateOne(&moveTaskOrder, &eTag)
if verrs != nil && verrs.HasAny() {
return nil, services.NewInvalidInputError(moveTaskOrder.ID, err, verrs, "")
}
if err != nil {
switch err.(type) {
case query.StaleIdentifierError:
return nil, services.NewPreconditionFailedError(moveTaskOrder.ID, err)
default:
return nil, err
}
}
return &moveTaskOrder, nil
}
// ShowHide changes the value in the "Show" field for a Move. This can be either True or False and indicates if the move has been deactivated or not.
func (o *moveTaskOrderUpdater) ShowHide(moveID uuid.UUID, show *bool) (*models.Move, error) {
searchParams := services.FetchMoveTaskOrderParams{
IncludeHidden: true, // We need to search every move to change its status
}
move, err := o.FetchMoveTaskOrder(moveID, &searchParams)
if err != nil {
return nil, services.NewNotFoundError(moveID, "while fetching the Move")
}
if show == nil {
return nil, services.NewInvalidInputError(moveID, nil, nil, "The 'show' field must be either True or False - it cannot be empty")
}
move.Show = show
verrs, err := o.db.ValidateAndSave(move)
if verrs != nil && verrs.HasAny() {
return nil, services.NewInvalidInputError(move.ID, err, verrs, "Invalid input found while updating the Move")
} else if err != nil {
return nil, services.NewQueryError("Move", err, "")
}
// Get the updated Move and return
updatedMove, err := o.FetchMoveTaskOrder(move.ID, &searchParams)
if err != nil {
return nil, services.NewQueryError("Move", err, fmt.Sprintf("Unexpected error after saving: %v", err))
}
return updatedMove, nil
}
|
package gflObject
import "github.com/garclak/gflgo/gflConst"
type Waypoint struct {
id : int
Type : gflConst.WaypointC
Designation : string
Altitude : int
LatHemi : gflConst.HemiC
LatDeg : int
LatMin : int
LatSec : int
LongHemi : gflConst.HemiC
LongDeg : int
LongMin : int
LongSec : int
Frequency : float32
Remarks : string
}
/*
func (l *LogonC) Const(ref string) int {
if ret, ok := l.elements[ref]; ok {
return ret
} else {
return -1
}
}
*/
func NewWaypoint(inId int) *Waypoint {
lt := new(Waypoint)
id := inId
return lt
}
|
package main
// Leetcode 386. (medium)
func lexicalOrder(n int) []int {
res := make([]int, n)
cur := 1
for i := 0; i < n; i++ {
res[i] = cur
if cur*10 <= n {
cur *= 10
} else {
if cur == n {
cur /= 10
}
cur++
for cur%10 == 0 {
cur /= 10
}
}
}
return res
}
|
package main
import (
"bufio"
"fmt"
"log"
"math/bits"
"os"
"runtime"
"strconv"
"strings"
"time"
)
const M = 150
const N = 6
var names []string
var attacks [M][3]uint64
type Result struct {
best int
answer [N]uint8
}
func (result *Result) search_i0(i0 int) {
var s4 [3]uint64
for i1 := 0; i1 < i0; i1++ {
for i2 := 0; i2 < i1; i2++ {
for i3 := 0; i3 < i2; i3++ {
for i4 := 0; i4 < i3; i4++ {
s4[0] = 0
s4[1] = 0
s4[2] = 0
s4[0] |= attacks[i0][0]
s4[1] |= attacks[i0][1]
s4[2] |= attacks[i0][2]
s4[0] |= attacks[i1][0]
s4[1] |= attacks[i1][1]
s4[2] |= attacks[i1][2]
s4[0] |= attacks[i2][0]
s4[1] |= attacks[i2][1]
s4[2] |= attacks[i2][2]
s4[0] |= attacks[i3][0]
s4[1] |= attacks[i3][1]
s4[2] |= attacks[i3][2]
s4[0] |= attacks[i4][0]
s4[1] |= attacks[i4][1]
s4[2] |= attacks[i4][2]
for i5 := 0; i5 < i4; i5++ {
s50 := s4[0]
s51 := s4[1]
s52 := s4[2]
s50 |= attacks[i5][0]
s51 |= attacks[i5][1]
s52 |= attacks[i5][2]
score := bits.OnesCount64(s50)
score += bits.OnesCount64(s51)
score += bits.OnesCount64(s52)
if score > result.best {
result.best = score
result.answer[0] = uint8(i0)
result.answer[1] = uint8(i1)
result.answer[2] = uint8(i2)
result.answer[3] = uint8(i3)
result.answer[4] = uint8(i4)
result.answer[5] = uint8(i5)
}
}
}
}
}
}
}
func worker(w int, jobs <-chan int, results chan<- Result) {
for i0 := range jobs {
result := Result{}
log.Printf("[w %d][i0 %d] processing\n", w, i0)
start := time.Now()
result.search_i0(i0)
end := time.Now()
elapsed := end.Sub(start)
log.Printf("[w %d][i0 %d] elapsed = %.3f s\n", w, i0, elapsed.Seconds())
log.Printf("[w %d][i0 %d] best = %d\n", w, i0, result.best)
results <- result
}
}
func search() Result {
jobs := make(chan int, M)
results := make(chan Result, M)
for i0 := M - 1; i0 >= 0; i0-- {
jobs <- i0
}
close(jobs)
//numWorkers := 2
numWorkers := runtime.GOMAXPROCS(0)
for w := 0; w < numWorkers; w++ {
go worker(w, jobs, results)
}
current := Result{}
//for result := range results {
for i0 := M - 1; i0 >= 0; i0-- {
result := <-results
if result.best > current.best {
current.best = result.best
for i := 0; i < N; i++ {
current.answer[i] = result.answer[i]
}
}
log.Printf("[current] best = %d\n", current.best)
}
return current
}
// go run pokemon.go input_3.txt
// go tool compile -S pokemon.go > pokemon_go.s
// go build -o pokemon_go.exe pokemon.go
// ./pokemon_go.exe input_3.txt
func main() {
if !(len(os.Args) >= 2) {
log.Fatal("usage: pokemon input.txt")
}
log.Printf("NumCPU= %d", runtime.NumCPU())
//runtime.GOMAXPROCS(2)
log.Printf("GOMAXPROCS= %d", runtime.GOMAXPROCS(0))
f, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer func() {
if err = f.Close(); err != nil {
log.Fatal(err)
}
}()
k := 0
s := bufio.NewScanner(f)
for s.Scan() {
s := strings.Split(strings.TrimSpace(s.Text()), ",")
names = append(names, s[0])
for _, v := range s[1:] {
x, err := strconv.Atoi(v)
if err != nil {
panic(err)
}
attacks[k][x/64] |= uint64(1) << (uint(x) % 64)
}
k++
}
err = s.Err()
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s %016x %016x %016x\n", names[0], attacks[0][0], attacks[0][1], attacks[0][2])
start := time.Now()
result := search()
end := time.Now()
elapsed := end.Sub(start)
log.Printf("elapsed = %.3f s\n", elapsed.Seconds())
log.Printf("best = %d\n", result.best)
for i := 0; i < N; i++ {
fmt.Println(names[result.answer[i]])
}
}
|
package atoi
import (
"fmt"
"testing"
)
func TestMAtoi(t *testing.T) {
str := " -123 bs"
fmt.Println(MAtoi(str))
}
|
//lint:file-ignore U1000 Ignore all unused code
package cmd
import (
"encoding/json"
"fmt"
"log"
"os"
"github.com/fugue/fugue-client/client"
"github.com/go-openapi/runtime"
httptransport "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/spf13/cobra"
)
const (
// DefaultHost is the default hostname of the Fugue API
DefaultHost = "api.riskmanager.fugue.co"
// DefaultBase is the base path of the Fugue API
DefaultBase = "v0"
)
func mustGetEnv(name string) string {
value := os.Getenv(name)
if value == "" {
fmt.Fprintf(os.Stderr, "Missing environment variable: %s\n", name)
os.Exit(1)
}
return value
}
func getEnvWithDefault(name, defaultValue string) string {
value := os.Getenv(name)
if value == "" {
return defaultValue
}
return value
}
func getClient() (*client.Fugue, runtime.ClientAuthInfoWriter) {
clientID := mustGetEnv("FUGUE_API_ID")
clientSecret := mustGetEnv("FUGUE_API_SECRET")
host := getEnvWithDefault("FUGUE_API_HOST", DefaultHost)
base := getEnvWithDefault("FUGUE_API_BASE", DefaultBase)
transport := httptransport.New(host, base, []string{"https"})
apiclient := client.New(transport, strfmt.Default)
auth := httptransport.BasicAuth(clientID, clientSecret)
return apiclient, auth
}
func showResponse(obj interface{}) {
js, err := json.Marshal(obj)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%s\n", string(js))
}
func flagStringValue(cmd *cobra.Command, name string) string {
value, err := cmd.PersistentFlags().GetString(name)
if err != nil {
log.Fatal(err)
}
return value
}
func flagBoolValue(cmd *cobra.Command, name string) bool {
value, err := cmd.PersistentFlags().GetBool(name)
if err != nil {
log.Fatal(err)
}
return value
}
func flagInt64Value(cmd *cobra.Command, name string) int64 {
value, err := cmd.PersistentFlags().GetInt64(name)
if err != nil {
log.Fatal(err)
}
return value
}
func flagStringSliceValue(cmd *cobra.Command, name string) []string {
if cmd.PersistentFlags().Lookup(name).Changed {
value, err := cmd.PersistentFlags().GetStringSlice(name)
if err != nil {
log.Fatal(err)
}
return value
}
return nil
}
|
package models
type Neuron struct {
Weights []float64
Value float64
Error float64
Biais float64
Expected float64
NewWeights []float64
}
const MaxWeight float64 = 20
const MaxBiais float64 = 250
|
package main
import (
"fmt"
"github.com/captncraig/blog/designBrowser"
"net/http"
"os"
)
func main() {
http.HandleFunc("/resume", resume)
http.HandleFunc("/colors", designSeeds)
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static/"))))
bind := fmt.Sprintf("%s:%s", os.Getenv("HOST"), os.Getenv("PORT"))
if bind == ":" {
bind = ":3322"
}
fmt.Printf("listening on %s...\n", bind)
err := http.ListenAndServe(bind, nil)
if err != nil {
panic(err)
}
}
func resume(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "static/resume.html")
}
func designSeeds(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query().Get("q")
if q == "" {
http.ServeFile(w, r, "static/colors/index.html")
} else {
designBrowser.GetMore(w, r, q)
}
}
|
package schema
import (
"github.com/facebookincubator/ent"
"github.com/facebookincubator/ent/schema/edge"
"github.com/facebookincubator/ent/schema/field"
)
// SubjectType holds the schema definition for the SubjectType entity.
type SubjectType struct {
ent.Schema
}
// Fields of the SubjectType.
func (SubjectType) Fields() []ent.Field {
return []ent.Field{
field.String("type_name").
NotEmpty().
Unique(),
}
}
// Edges of the SubjectType.
func (SubjectType) Edges() []ent.Edge {
return []ent.Edge{
edge.To("course_items", CourseItem.Type).
StorageKey(edge.Column("type_id")),
}
}
|
package protocol
import fuzz "github.com/jamieabc/gofuzz"
const (
transactionStatusRPCMethod string = "Transaction.Status"
)
type TransactionStatusRpc struct {
ID string `json:"id"`
Method string `json:"method"`
Params []TransactionStatus `json:"params"`
}
type TransactionStatus struct {
TxID string `json:"TxId"`
}
func (rpc *TransactionStatusRpc) JustifyData() {
rpc.Method = transactionStatusRPCMethod
rpc.ID = "1"
}
// GenRandomData generates random data fits specific interface
func (rpc *TransactionStatusRpc) GenRandomData() {
f := fuzz.New()
f.Fuzz(rpc)
}
// SampleData generates correct data
func (rpc *TransactionStatusRpc) SampleData() {
rpc.ID = "1"
rpc.Method = transactionStatusRPCMethod
rpc.Params = []TransactionStatus{
TransactionStatus{
TxID: "2dc8770718b01f0205ad991bfb4c052f02677cff60e65d596e890cb6ed82c861",
},
}
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
)
func main() {
destination, departure := getPlaces()
key := getAPIKey()
client := Client{
APIKey: key,
Departure: destination,
Destination: departure,
}
response, error := client.Request()
if error != nil {
log.Fatalln(error)
}
printGPX(*response)
}
func getAPIKey() string {
key := os.Getenv("APIKEY")
if len(key) == 0 {
log.Fatalln("APIKEY is empty.")
}
return key
}
func getPlaces() (string, string) {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, `
Usage of %s:
%s [OPTIONS] ARGS ...
Options\n`, os.Args[0], os.Args[0])
flag.PrintDefaults()
}
startOption := flag.String("s", "", "Departure place name")
goalOption := flag.String("g", "", "Destination place name")
flag.Parse()
if len(*startOption) == 0 || len(*goalOption) == 0 {
flag.Usage()
os.Exit(1)
}
return *startOption, *goalOption
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package descriptorutils
import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/errors"
)
// GetIndexMutation returns a reference to a specified index add/drop mutation
// on a table.
func GetIndexMutation(
table catalog.TableDescriptor, idxID descpb.IndexID,
) (mut *descpb.DescriptorMutation, sliceIdx int, err error) {
mutations := table.TableDesc().Mutations
for i := range mutations {
mut := &mutations[i]
idx := mut.GetIndex()
if idx != nil && idx.ID == idxID {
return mut, i, nil
}
}
return nil, 0, errors.AssertionFailedf("mutation not found")
}
// GetColumnMutation returns a reference to a specified column add/drop mutation
// on a table.
func GetColumnMutation(
table catalog.TableDescriptor, colID descpb.ColumnID,
) (mut *descpb.DescriptorMutation, sliceIdx int, err error) {
mutations := table.TableDesc().Mutations
for i := range mutations {
mut := &mutations[i]
col := mut.GetColumn()
if col != nil && col.ID == colID {
return mut, i, nil
}
}
return nil, 0, errors.AssertionFailedf("mutation not found")
}
|
package dragonpuzzle
import (
"testing"
)
func TestTrack(t *testing.T) {
tests := []struct{
desc string
call *Track
want *Track
}{
{
desc: "nil track",
call: &Track{},
want: &Track{Col: BLANK},
},
{
desc: "red no hts",
call: Red(),
want: &Track{Col: RED, Count: HT{}, Ends: 0},
},
{
desc: "green no hts",
call: Green(),
want: &Track{Col: GREEN, Count: HT{}, Ends: 0},
},
{
desc: "yellow no hts",
call: Yellow(),
want: &Track{Col:YELLOW, Count: HT{}, Ends: 0},
},
{
desc: "red with 2 heads",
call: Red(H, H),
want: &Track{Col: RED, Count: HT{2, 0}, Ends: 0},
},
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
if !test.call.Equal(test.want) {
t.Errorf("tracks not equal: got %s, want %s", test.call, test.want)
}
})
}
}
func TestTile(t *testing.T) {
tile, err := NewTile([]byte{0, 1, 2, 0, 3, 1}, Green(), Red(H), Yellow(T))
t.Log(tile, err)
tile.Turn(1)
t.Log(tile)
}
|
package types
type UbuntuOptions struct {
InstallUbuntu InstallUbuntu `json:"install-ubuntu"`
}
|
package nanolog
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"runtime"
"strings"
"sync"
)
// LogLevel determines the level of logging (priority)
// Separate loggers exists for all types of log levels
type LogLevel string
// passthrough constants from standart log package
const (
Ldate = log.Ldate
Ltime = log.Ltime
Lmicroseconds = log.Lmicroseconds
Llongfile = log.Llongfile
Lshortfile = log.Lshortfile
LUTC = log.LUTC
LstdFlags = log.LstdFlags
)
const (
DebugLevel LogLevel = "DEBUG"
InfoLevel LogLevel = "INFO"
WarnLevel LogLevel = "WARN"
ErrorLevel LogLevel = "ERROR"
FatalLevel LogLevel = "FATAL"
DefaultLogLevel = ErrorLevel
DefaultPrefix = "[%v] "
DefaultFlags = log.LstdFlags
)
const (
defaultDebugColor = 32
defaultInfoColor = 35
defaultWarnColor = 33
defaultErrorColor = 31
)
var (
DebugColor = defaultDebugColor
InfoColor = defaultInfoColor
WarnColor = defaultWarnColor
ErrorColor = defaultErrorColor
)
var mutex = &sync.Mutex{}
var loggers = createLoggers(Options{Level: DefaultLogLevel})
// Logger wrapper for standart log.Logger
type Logger struct {
level LogLevel
prefix string
flags int
logger *log.Logger
}
func New(lvl LogLevel, writer io.Writer, prefix string, flags int) *Logger {
return &Logger{
level: lvl,
prefix: prefix,
flags: flags,
logger: log.New(writer, prefix, flags),
}
}
func (l *Logger) Println(args ...interface{}) {
if l.level == FatalLevel {
l.logger.Fatalln(args...)
return
}
l.logger.Println(args...)
}
func (l *Logger) Printf(msg string, args ...interface{}) {
if l.level == FatalLevel {
l.logger.Fatalf(msg, args...)
return
}
l.logger.Printf(msg, args...)
}
// ParseLevel parse a string representation of log level and returns equivalent LogLevel
// useful when set value from config
func ParseLevel(str string) LogLevel {
logLevel := strings.ToLower(str)
return map[string]LogLevel{
"debug": DebugLevel,
"info": InfoLevel,
"warn": WarnLevel,
"error": ErrorLevel,
"fatal": FatalLevel,
}[logLevel]
}
type LoggerOptions struct {
Writer io.Writer
Color int
Prefix string
Flags int
}
type internalOptions struct {
writer io.Writer
color int
prefix string
flags int
priority int
}
// Options provide basic options for tuning logging
type Options struct {
Level LogLevel
Debug LoggerOptions
Info LoggerOptions
Warn LoggerOptions
Error LoggerOptions
Fatal LoggerOptions
}
func FormatPrefix(prefix string, colorCode int, level LogLevel) string {
if colorCode == 0 {
return string(level)
}
formattedPrefix := fmt.Sprintf("\x1b[%dm%s\x1b[m", colorCode, level)
return fmt.Sprintf(prefix, formattedPrefix)
}
// NoColor turn off colored output
func NoColor() {
DebugColor = 0
InfoColor = 0
WarnColor = 0
ErrorColor = 0
}
func internalDefaults(priority int, color int) internalOptions {
return internalOptions{
priority: priority,
color: color,
writer: os.Stdout,
prefix: DefaultPrefix,
flags: DefaultFlags,
}
}
func getDefaultOptions() map[LogLevel]internalOptions {
return map[LogLevel]internalOptions{
DebugLevel: internalDefaults(1, DebugColor),
InfoLevel: internalDefaults(2, InfoColor),
WarnLevel: internalDefaults(3, WarnColor),
ErrorLevel: internalDefaults(4, ErrorColor),
FatalLevel: internalDefaults(5, ErrorColor),
}
}
func createLoggers(opts Options) map[LogLevel]*Logger {
mutex.Lock()
defer mutex.Unlock()
loggers := make(map[LogLevel]*Logger)
if opts.Level == "" {
opts.Level = DefaultLogLevel
}
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
NoColor()
}
optionsOverride := map[LogLevel]LoggerOptions{
DebugLevel: opts.Debug,
InfoLevel: opts.Info,
WarnLevel: opts.Warn,
ErrorLevel: opts.Error,
FatalLevel: opts.Fatal,
}
defaultOptions := getDefaultOptions()
lvlPriority := defaultOptions[opts.Level].priority
for lvl, defaultOptions := range defaultOptions {
if defaultOptions.priority < lvlPriority {
loggers[lvl] = New(lvl, ioutil.Discard, "", log.LstdFlags)
continue
}
override := optionsOverride[lvl]
writer := override.Writer
color := override.Color
prefix := override.Prefix
flags := override.Flags
if writer == nil {
writer = defaultOptions.writer
}
if color == 0 {
color = defaultOptions.color
}
if prefix == "" {
prefix = defaultOptions.prefix
}
if flags == 0 {
flags = defaultOptions.flags
}
if flags == -1 {
flags = 0
}
prefixFormatted := FormatPrefix(prefix, color, lvl)
loggers[lvl] = New(lvl, writer, prefixFormatted, flags)
}
return loggers
}
// Init initialize default loggers
func Init(opts Options) {
loggers = createLoggers(opts)
}
func getLogger(lvl LogLevel) *Logger {
mutex.Lock()
defer mutex.Unlock()
return loggers[lvl]
}
// Log logs arguments
func Log(lvl LogLevel, args ...interface{}) {
getLogger(lvl).Println(args...)
}
// Logf logs arguments with formatting
func Logf(lvl LogLevel, msg string, args ...interface{}) {
getLogger(lvl).Printf(msg, args...)
}
func Debug() *Logger {
return getLogger(DebugLevel)
}
func Info() *Logger {
return getLogger(InfoLevel)
}
func Warn() *Logger {
return getLogger(WarnLevel)
}
func Error() *Logger {
return getLogger(ErrorLevel)
}
func Fatal() *Logger {
return getLogger(FatalLevel)
}
type NanoLogger struct {
}
func (l *NanoLogger) Debug() *Logger {
return Debug()
}
func (l *NanoLogger) Info() *Logger {
return Info()
}
func (l *NanoLogger) Warn() *Logger {
return Warn()
}
func (l *NanoLogger) Error() *Logger {
return Error()
}
func (l *NanoLogger) Fatal() *Logger {
return Fatal()
}
var std = &NanoLogger{}
func DefaultLogger() *NanoLogger {
return std
}
|
package entity
import "gorm.io/gorm"
type Review struct {
gorm.Model
UserID uint
MovieID uint
Review string
Rate uint
Movie Movie
User User
}
|
package atgo
import (
"context"
"fmt"
"github.com/hashicorp/errwrap"
pay "github.com/wondenge/at-go/payments"
"go.uber.org/zap"
)
// Move money from a Payment Product to an application stash.
// An application stash is the wallet that funds your service usage expenses.
func (c *Client) TopupStash(ctx context.Context, p *pay.TopupStashPayload) (res *pay.TopupStashResponse, err error) {
if err := c.requestJSONBody(ctx, "POST", fmt.Sprintf("%s%s", c.PaymentEndpoint, "/topup/stash"), p, res); err != nil {
err := errwrap.Wrapf("could not make new http request: {{err}}", err)
c.Log.Info("error", zap.Error(err))
}
return res, nil
}
|
package peer
import (
"net"
)
type CoreTCPSocketOption struct {
readBufferSize int
writeBufferSize int
noDelay bool
}
func (self *CoreTCPSocketOption) SetSocketBuffer(readBufferSize, writeBufferSize int, noDelay bool) {
self.readBufferSize = readBufferSize
self.writeBufferSize = writeBufferSize
self.noDelay = noDelay
}
func (self *CoreTCPSocketOption) ApplySocketOption(conn net.Conn) {
if cc, ok := conn.(*net.TCPConn); ok {
if self.readBufferSize >= 0 {
cc.SetReadBuffer(self.readBufferSize)
}
if self.writeBufferSize >= 0 {
cc.SetWriteBuffer(self.writeBufferSize)
}
cc.SetNoDelay(self.noDelay)
}
}
func (self *CoreTCPSocketOption) Init() {
self.readBufferSize = -1
self.writeBufferSize = -1
}
|
package entity
type TwitterUser struct {
ID string
Name string
Username string
Description string
ProfileImageURL string
}
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package header
import (
"encoding/binary"
tcpip "github.com/brewlin/net-protocol/protocol"
)
// ICMPv6 represents an ICMPv6 header stored in a byte array.
type ICMPv6 []byte
const (
// ICMPv6MinimumSize is the minimum size of a valid ICMP packet.
ICMPv6MinimumSize = 4
// ICMPv6ProtocolNumber is the ICMP transport protocol number.
ICMPv6ProtocolNumber tcpip.TransportProtocolNumber = 58
// ICMPv6NeighborSolicitMinimumSize is the minimum size of a
// neighbor solicitation packet.
ICMPv6NeighborSolicitMinimumSize = ICMPv6MinimumSize + 4 + 16
// ICMPv6NeighborAdvertSize is size of a neighbor advertisement.
ICMPv6NeighborAdvertSize = 32
// ICMPv6EchoMinimumSize is the minimum size of a valid ICMP echo packet.
ICMPv6EchoMinimumSize = 8
// ICMPv6DstUnreachableMinimumSize is the minimum size of a valid ICMP
// destination unreachable packet.
ICMPv6DstUnreachableMinimumSize = ICMPv6MinimumSize + 4
// ICMPv6PacketTooBigMinimumSize is the minimum size of a valid ICMP
// packet-too-big packet.
ICMPv6PacketTooBigMinimumSize = ICMPv6MinimumSize + 4
)
// ICMPv6Type is the ICMP type field described in RFC 4443 and friends.
type ICMPv6Type byte
// Typical values of ICMPv6Type defined in RFC 4443.
const (
ICMPv6DstUnreachable ICMPv6Type = 1
ICMPv6PacketTooBig ICMPv6Type = 2
ICMPv6TimeExceeded ICMPv6Type = 3
ICMPv6ParamProblem ICMPv6Type = 4
ICMPv6EchoRequest ICMPv6Type = 128
ICMPv6EchoReply ICMPv6Type = 129
// Neighbor Discovery Protocol (NDP) messages, see RFC 4861.
ICMPv6RouterSolicit ICMPv6Type = 133
ICMPv6RouterAdvert ICMPv6Type = 134
ICMPv6NeighborSolicit ICMPv6Type = 135
ICMPv6NeighborAdvert ICMPv6Type = 136
ICMPv6RedirectMsg ICMPv6Type = 137
)
// Values for ICMP code as defined in RFC 4443.
const (
ICMPv6PortUnreachable = 4
)
// Type is the ICMP type field.
func (b ICMPv6) Type() ICMPv6Type { return ICMPv6Type(b[0]) }
// SetType sets the ICMP type field.
func (b ICMPv6) SetType(t ICMPv6Type) { b[0] = byte(t) }
// Code is the ICMP code field. Its meaning depends on the value of Type.
func (b ICMPv6) Code() byte { return b[1] }
// SetCode sets the ICMP code field.
func (b ICMPv6) SetCode(c byte) { b[1] = c }
// Checksum is the ICMP checksum field.
func (b ICMPv6) Checksum() uint16 {
return binary.BigEndian.Uint16(b[2:])
}
// SetChecksum calculates and sets the ICMP checksum field.
func (b ICMPv6) SetChecksum(checksum uint16) {
binary.BigEndian.PutUint16(b[2:], checksum)
}
// SourcePort implements Transport.SourcePort.
func (ICMPv6) SourcePort() uint16 {
return 0
}
// DestinationPort implements Transport.DestinationPort.
func (ICMPv6) DestinationPort() uint16 {
return 0
}
// SetSourcePort implements Transport.SetSourcePort.
func (ICMPv6) SetSourcePort(uint16) {
}
// SetDestinationPort implements Transport.SetDestinationPort.
func (ICMPv6) SetDestinationPort(uint16) {
}
// Payload implements Transport.Payload.
func (b ICMPv6) Payload() []byte {
return b[ICMPv6MinimumSize:]
}
|
package request
import (
"marketplace/transactions/domain"
)
type GetAdsResponse struct {
Id int64 `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Price float64 `json:"price"`
UserId int64 `json:"userId"`
Picture string `json:"-"`
Sold bool `json:"sold"`
}
type GetMessageResponse struct {
Id int64 `json:"id"`
SenderId int64 `json:"senderId"`
Sender *domain.Account `json:"-"`
TransactionId int64 `json:"transactionId"`
Transaction *domain.Transaction `json:"-"`
Message string `json:"message"`
}
type GetAccountResponse struct {
Id int64 `json:"id"`
Email string `json:"email"`
Username string `json:"username"`
Password string `json:"-"`
Balance float64 `json:"-"`
Ads []domain.Ads `json:"-"`
Admin bool `json:"admin"`
}
type GetTransactionsResponse struct {
Id int64 `json:"id"`
Buyer GetAccountResponse `json:"buyer"`
BuyerId int64 `json:"buyerId"`
Seller GetAccountResponse `json:"seller"`
SellerId int64 `json:"sellerId"`
Ads GetAdsResponse `json:"ads"`
AdsId int64 `json:"adsId"`
Messages []GetMessageResponse `json:"messages"`
Bid float64 `json:"bid"`
Status string `json:"status"`
}
func ConvertToResponse(transacs []domain.Transaction) []GetTransactionsResponse {
convertedTransacs := []GetTransactionsResponse{}
for _, transac := range transacs {
convertedMessages := []GetMessageResponse{}
for _, message := range transac.Messages {
convertedMessages = append(convertedMessages, GetMessageResponse{
Id: message.Id,
SenderId: message.SenderId,
Sender: message.Sender,
TransactionId: message.TransactionId,
Transaction: message.Transaction,
Message: message.Message,
})
}
convertedTransacs = append(convertedTransacs, GetTransactionsResponse{
Id: transac.Id,
Buyer: GetAccountResponse(*transac.Buyer),
BuyerId: transac.BuyerId,
Seller: GetAccountResponse(*transac.Seller),
SellerId: transac.SellerId,
Ads: GetAdsResponse(*transac.Ads),
AdsId: transac.AdsId,
Messages: convertedMessages,
Bid: transac.Bid,
Status: transac.Status,
})
}
return convertedTransacs
}
|
/*
author:admin
createTime: 2022-06-16 16:30
*/
package main
import "fmt"
func bubbleSort(arr []int) []int {
if len(arr) <= 1 {
return arr
}
for i := 0; i < len(arr)-1; i++ {
for j := 0; j < len(arr)-i-1; j++ {
if arr[j] > arr[j+1] {
arr[j], arr[j+1] = arr[j+1], arr[j]
fmt.Println("current:", arr)
}
}
}
return arr
}
// bubbleSort
func bubbleSortByRecursion(arr []int) []int {
if len(arr) <= 1 {
return arr
}
for i := 0; i < len(arr)-1; i++ {
if arr[i] > arr[i+1] {
arr[i], arr[i+1] = arr[i+1], arr[i]
}
}
bubbleSortByRecursion(arr[:len(arr)-1])
return arr
}
func main() {
arr := []int{6, 5, 3, 1, 8, 7, 2, 4}
arr = bubbleSort(arr)
for _, v := range arr {
fmt.Printf("%d\t", v)
}
recursion := bubbleSortByRecursion(arr)
fmt.Println("recursion:", recursion)
}
|
package udwSync
import (
"github.com/tachyon-protocol/udw/udwTest"
"sync"
"testing"
)
func TestWaitGroup(ot *testing.T) {
{
wg := Rc{}
wg.Add(1)
wg.Done()
wg.Wait()
}
{
i := Int{}
wg := Rc{}
wg.Inc()
go func() {
i.Inc()
wg.Dec()
}()
wg.Wait()
udwTest.Equal(i.Get(), 1)
}
{
counter := Int{}
wg := Rc{}
wg.Inc()
go func() {
defer wg.Dec()
for i := 0; i < 10; i++ {
wg.Inc()
go func() {
counter.Inc()
wg.Dec()
}()
}
}()
for i := 0; i < 10; i++ {
wg.Wait()
udwTest.Equal(counter.Get(), 10)
}
}
{
for i := 0; i < 10; i++ {
wg := sync.WaitGroup{}
wg.Add(1)
for j := 0; j < 10; j++ {
go func() {
wg.Wait()
}()
}
wg.Add(10)
for j := 0; j < 10; j++ {
go func() {
wg.Add(1)
go func() {
wg.Done()
}()
wg.Done()
}()
}
wg.Done()
wg.Wait()
}
}
{
wg := Rc{}
wg.Add(1)
go func() {
wg.Wait()
}()
go func() {
wg.Add(1)
wg.Done()
}()
wg.Done()
wg.Wait()
}
}
|
package main
import (
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
"github.com/than-os/sentinel-bot/nodes/master-node/service"
)
func main() {
e := echo.New()
//middlewares
e.Use(middleware.Logger())
e.Use(middleware.Recover())
//CORS
e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"*"},
AllowMethods: []string{echo.GET, echo.POST, echo.DELETE},
}))
e.GET("/", service.RootFunc)
e.POST("/user", service.AddNewUser)
e.DELETE("/user", service.RemoveUser)
//Start the server
e.Start(":30002")
} |
package main
import (
"flag"
"fmt"
"os"
"github.com/n0x1m/md2gmi/mdproc"
"github.com/n0x1m/md2gmi/pipe"
)
func main() {
var in, out string
flag.StringVar(&in, "i", "", "specify a .md (Markdown) file to read from, otherwise stdin (default)")
flag.StringVar(&out, "o", "", "specify a .gmi (gemtext) file to write to, otherwise stdout (default)")
flag.Parse()
r, err := reader(in)
if err != nil {
fmt.Fprint(os.Stderr, err.Error())
os.Exit(1)
}
w, err := writer(out)
if err != nil {
fmt.Fprint(os.Stderr, err.Error())
os.Exit(1)
}
s := pipe.New()
s.Use(mdproc.Preprocessor())
s.Use(mdproc.RemoveFrontMatter)
s.Use(mdproc.FormatHeadings)
s.Use(mdproc.FormatLinks)
s.Handle(source(r), sink(w))
}
|
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2015 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package carno outputs carno service descriptions in Go code.
// It runs as a plugin for the Go protocol buffer compiler plugin.
// It is linked in to protoc-gen-go.
package carno
import (
"fmt"
"strconv"
"strings"
pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"github.com/ccsnake/protobuf/protoc-gen-go/generator"
"sync"
)
// generatedCodeVersion indicates a version of the generated code.
// It is incremented whenever an incompatibility between the generated code and
// the carno package is introduced; the generated code references
// a constant, carno.SupportPackageIsVersionN (where N is generatedCodeVersion).
const generatedCodeVersion = 4
func init() {
generator.RegisterPlugin(newCarno())
}
// carno is an implementation of the Go protocol buffer compiler's
// plugin architecture. It generates bindings for carno support.
type carno struct {
gen *generator.Generator
serverBuilder func()
}
func newCarno() *carno {
return &carno{}
}
// Name returns the name of this plugin, "carno".
func (g *carno) Name() string {
return "carno"
}
// Init initializes the plugin.
func (g *carno) Init(gen *generator.Generator) {
g.gen = gen
pkgService := make(map[string][]string)
for _, file := range gen.Request.ProtoFile {
for _, service := range file.Service {
pkgService[file.GetPackage()] = append(pkgService[file.GetPackage()], service.GetName())
}
}
var once sync.Once
g.serverBuilder = func() {
once.Do(func() {
for pkg, services := range pkgService {
g.generateServerPackage(pkg, services...)
g.generateInit(pkg)
}
})
}
}
func (g *carno) generateInit(pkg string) {
pkgQ := strconv.Quote(pkg)
g.P("var ServerName = ", pkgQ)
g.P("func InitCarno(opts ...carno.Option) error{")
g.P("return carno.Init(", pkgQ, ", opts...)")
g.P("}")
}
// Given a type name defined in a .proto, return its object.
// Also record that we're using it, to guarantee the associated import.
func (g *carno) objectNamed(name string) generator.Object {
g.gen.RecordTypeUse(name)
return g.gen.ObjectNamed(name)
}
// Given a type name defined in a .proto, return its name as we will print it.
func (g *carno) typeName(str string) string {
return g.gen.TypeName(g.objectNamed(str))
}
// P forwards to g.gen.P.
func (g *carno) P(args ...interface{}) { g.gen.P(args...) }
// Generate generates code for the services in the given file.
func (g *carno) Generate(file *generator.FileDescriptor) {
if len(file.FileDescriptorProto.Service) == 0 {
return
}
g.P("// Reference imports to suppress errors if they are not otherwise used.")
g.P()
// Assert version compatibility.
g.P("// This is a compile-time assertion to ensure that this generated file")
g.P("// is compatible with the carno package it is being compiled against.")
g.P()
g.serverBuilder()
for i, service := range file.FileDescriptorProto.Service {
g.generateService(file, service, i)
}
}
// GenerateImports generates the import declaration for this file.
func (g *carno) GenerateImports(file *generator.FileDescriptor) {
if len(file.FileDescriptorProto.Service) == 0 {
return
}
g.P("import (")
g.P(strconv.Quote("github.com/ccsnake/carno"))
g.P(strconv.Quote("github.com/ccsnake/carno/client"))
g.P(strconv.Quote("github.com/ccsnake/carno/mux"))
g.P(strconv.Quote("context"))
g.P(")")
g.P()
}
// reservedClientName records whether a client name is reserved on the client side.
var reservedClientName = map[string]bool{
// TODO: do we need any in carno?
}
func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
// generateService generates all the code for the named service.
func (g *carno) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
path := fmt.Sprintf("6,%d", index) // 6 means service.
origServName := service.GetName()
fullServName := origServName
// pkg代表服务名
if pkg := file.GetPackage(); pkg != "" {
fullServName = pkg + "@" + fullServName
}
servName := generator.CamelCase(origServName)
g.P()
g.P("// Client API for ", servName, " service")
// Client interface.
g.P("type ", servName, "Client interface {")
for i, method := range service.Method {
g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
g.P(g.generateClientSignature(servName, method))
}
g.P("}")
g.P()
// Client structure.
g.P("type ", unexport(servName), "Client struct {")
g.P("client.Client")
g.P("}")
g.P()
// NewClient factory.
g.P("func New", servName, "Client (opts ...client.Option) (", servName, "Client, error) {")
g.P(` c,err := carno.NewClient(`, strconv.Quote(file.GetPackage()), `,opts...)`)
g.P("if err!=nil{")
g.P("return nil,err")
g.P("}")
g.P("rv := &", unexport(servName), "Client{Client: c}")
g.P("return rv, c.Start()")
g.P("}")
g.P()
var methodIndex int
serviceDescVar := "_" + servName + "_serviceDesc"
// Client method implementations.
for _, method := range service.Method {
descExpr := fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
methodIndex++
g.generateClientMethod(file.GetPackage(), origServName, fullServName, serviceDescVar, method, descExpr)
}
g.P("// Server API for ", servName, " service")
// Server interface.
serverType := servName + "Server"
g.P("type ", serverType, " interface {")
for i, method := range service.Method {
g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
g.P(g.generateServerSignature(servName, method))
}
g.P("}")
g.P()
g.generateServerSetting(file)
g.P()
g.P("func Register", servName, "Server(srv ", serverType, ") {")
g.P("carno.HandleService(&", serviceDescVar, `, srv)`)
g.P("}")
g.P()
// Service descriptor.
g.P("var ", serviceDescVar, " = ", "mux.ServiceDesc {")
g.P("ServiceName: ", strconv.Quote(origServName), ",")
g.P("Methods: []", "string{")
for _, method := range service.Method {
if method.GetServerStreaming() || method.GetClientStreaming() {
continue
}
g.P(strconv.Quote(method.GetName()), ",")
}
g.P("},")
g.P("}")
g.P()
}
// generateClientSignature returns the client-side signature for a method.
func (g *carno) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
reqArg := ", in *" + g.typeName(method.GetInputType())
if method.GetClientStreaming() {
reqArg = ""
}
respName := "*" + g.typeName(method.GetOutputType())
if method.GetServerStreaming() || method.GetClientStreaming() {
respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
}
return fmt.Sprintf("%s(ctx context.Context%s, opts ...client.CallOption) (%s, error)", methName, reqArg, respName)
}
func (g *carno) generateClientMethod(pkgName, servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
outType := g.typeName(method.GetOutputType())
g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
g.P("out := new(", outType, ")")
// invoke
g.P(`err:=c.Client.Call(ctx, `, strconv.Quote(servName), ",", strconv.Quote(method.GetName()), `, in, out, opts...)`)
g.P("return out, err")
g.P("}")
g.P()
return
}
// generateServerSignature returns the server-side signature for a method.
func (g *carno) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
var reqArgs []string
ret := "error"
reqArgs = append(reqArgs, "context.Context", "*"+g.typeName(method.GetInputType()))
ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
}
func (g *carno) generateServerSetting(file *generator.FileDescriptor) {
pkg := file.GetPackage()
if pkg == "" {
panic("empty package")
}
}
func (g *carno) generateServerPackage(pkg string, services ...string) {
camelCasePkgName := generator.CamelCase(strings.Replace(pkg, ".", "_", -1))
g.P("type ", camelCasePkgName, " struct{")
for _, service := range services {
g.P(generator.CamelCase(service), "Client")
}
g.P("}")
g.P("")
g.P("func New", camelCasePkgName, "(opts ...client.Option) (*", camelCasePkgName, ",error){")
g.P(` c,err := carno.NewClient(`, strconv.Quote(pkg), `,opts...)`)
g.P("if err!=nil{")
g.P("return nil,err")
g.P("}")
g.P("if err:=c.Start();err!=nil{")
g.P("return nil,err")
g.P("}")
g.P("return &", camelCasePkgName, "{")
for _, service := range services {
g.P(generator.CamelCase(service), "Client: &", unexport(service), "Client{Client:c},")
}
g.P("},nil")
g.P("}")
g.P("")
}
|
package filewatch
import (
"context"
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/jonboulle/clockwork"
"github.com/tilt-dev/tilt/internal/watch"
"github.com/tilt-dev/tilt/pkg/apis"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/logger"
)
// MaxFileEventHistory is the maximum number of file events that will be retained on the FileWatch status.
const MaxFileEventHistory = 20
const maxRestartBackoff = 5 * time.Minute
const DetectedOverflowErrMsg = `It looks like the inotify event queue has overflowed. Check these instructions for how to raise the queue limit: https://facebook.github.io/watchman/docs/install#system-specific-preparation`
type watcher struct {
clock clockwork.Clock
name types.NamespacedName
spec v1alpha1.FileWatchSpec
status *v1alpha1.FileWatchStatus
mu sync.Mutex
restartBackoff time.Duration
doneAt time.Time
done bool
notify watch.Notify
cancel func()
}
// Whether we need to restart the watcher.
func (w *watcher) shouldRestart() (bool, ctrl.Result) {
w.mu.Lock()
defer w.mu.Unlock()
if !w.done {
return false, ctrl.Result{}
}
if w.clock.Since(w.doneAt) < w.restartBackoff {
return false, ctrl.Result{RequeueAfter: w.restartBackoff - w.clock.Since(w.doneAt)}
}
return true, ctrl.Result{}
}
// cleanupWatch stops watching for changes and frees up resources.
func (w *watcher) cleanupWatch(ctx context.Context) {
w.mu.Lock()
defer w.mu.Unlock()
if w.done {
return
}
if w.notify != nil {
if err := w.notify.Close(); err != nil {
logger.Get(ctx).Debugf("Failed to close notifier for %q: %v", w.name.String(), err)
}
}
w.restartBackoff *= 2
if w.restartBackoff > maxRestartBackoff {
w.restartBackoff = maxRestartBackoff
}
w.doneAt = w.clock.Now()
if ctx.Err() == nil && w.status.Error == "" {
w.status.Error = "unexpected close"
}
w.cancel()
w.done = true
}
func (w *watcher) copyStatus() *v1alpha1.FileWatchStatus {
w.mu.Lock()
defer w.mu.Unlock()
return w.status.DeepCopy()
}
func (w *watcher) recordError(err error) {
w.mu.Lock()
defer w.mu.Unlock()
if err == nil {
w.status.Error = ""
} else {
w.status.Error = err.Error()
}
}
func (w *watcher) recordEvent(fsEvents []watch.FileEvent) {
now := apis.NowMicro()
w.mu.Lock()
defer w.mu.Unlock()
event := v1alpha1.FileEvent{Time: *now.DeepCopy()}
for _, fsEvent := range fsEvents {
event.SeenFiles = append(event.SeenFiles, fsEvent.Path())
}
if len(event.SeenFiles) != 0 {
w.status.LastEventTime = *now.DeepCopy()
w.status.FileEvents = append(w.status.FileEvents, event)
if len(w.status.FileEvents) > MaxFileEventHistory {
w.status.FileEvents = w.status.FileEvents[len(w.status.FileEvents)-MaxFileEventHistory:]
}
w.status.Error = ""
}
}
|
package review
import (
. "movie-app/entity"
"movie-app/movie"
"movie-app/user"
)
type ReviewFormatter struct {
ID int `json:"id"`
UserID uint `json:"user_id"`
MovieID uint `json:"movie_id"`
Review string `json:"review"`
Rate uint `json:"rate"`
Movie movie.MovieFormatter `json:"movie"`
User user.UserFormatter `json:"user"`
}
func FormatReview(review Review) (reviewFormatter ReviewFormatter) {
reviewFormatter = ReviewFormatter{
ID: int(review.ID),
UserID: review.UserID,
MovieID: review.MovieID,
Review: review.Review,
Rate: review.Rate,
Movie: movie.FormatMovie(review.Movie),
User: user.FormatUser(review.User),
}
return
}
func FormatReviews(reviews []Review) (reviewFormatters []ReviewFormatter) {
if len(reviews) == 0 {
return []ReviewFormatter{}
}
for _, review := range reviews {
formatter := FormatReview(review)
reviewFormatters = append(reviewFormatters, formatter)
}
return
}
|
package apilifecycle
// OnStart Set
func (api *APILifeCycle) OnStart(handler HandlerCycle) {
api.onStart = handler
}
// GetOnStart Get
func (api *APILifeCycle) GetOnStart() HandlerCycle {
return api.onStart
}
|
package verdeps
const (
gophrPrefix = "\"gophr.pm/"
goFileSuffix = ".go"
githubPrefix = "\"github.com/"
)
|
package shardmaster
import "raft"
import "labrpc"
import "sync"
import "encoding/gob"
import "log"
import "time"
var Debug = 0
const time_1 = time.Second * 1
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
ack map[int64]int
ret map[int]chan OpReply
configNum int
configs []Config // indexed by config num
}
const Join = "Join"
const Leave = "Leave"
const Move = "Move"
const Query = "Query"
type Op struct {
// Your data here.
OpType string
Args interface{}
}
type OpReply struct{
OpType string
Args interface{}
reply interface{}
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
// Your code here.
f := Op{OpType: "Join", Args: *args}
index, _, isLeader := sm.rf.Start(f)
if isLeader == false {
reply.WrongLeader = true
return
}
sm.mu.Lock()
_, ok := sm.ret[index]
if ok == false {
sm.ret[index] = make(chan OpReply, 1)
}
cha := sm.ret[index]
sm.mu.Unlock()
select {
case log := <- cha:
argus, ok := log.args.(JoinArgs)
if ok == false {
reply.WrongLeader = true
}else{
if args.ClientId == argus.ClientId {
if args.RequestId == argus.RequestId {
reply.Err = log.reply.(JoinReply).Err
reply.WrongLeader = false
}else{
reply.WrongLeader= true
}
}else{
reply.WrongLeader = true
}
}
case <- time.After(time_1):
reply.WrongLeader = true
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
// Your code here.
f := Op{OpType: "Leave", Args: *args}
index, _, isLeader := sm.rf.Start(f)
if isLeader == false {
reply.WrongLeader = true
return
}
sm.mu.Lock()
_, ok := sm.ret[index]
if ok == false {
sm.ret[index] = make(chan OpReply, 1)
}
cha := sm.ret[index]
sm.mu.Unlock()
select {
case log := <- cha:
argus, ok := log.args.(LeaveArgs)
if ok == false {
reply.WrongLeader = true
}else{
if args.ClientId == argus.ClientId {
if args.RequestId == argus.RequestId {
reply.Err = log.reply.(LeaveReply).Err
reply.WrongLeader = false
}else{
reply.WrongLeader= true
}
}else{
reply.WrongLeader = true
}
}
case <- time.After(time_1):
reply.WrongLeader = true
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
// Your code here.
f := Op{OpType: "Move", Args: *args}
index, _, isLeader := sm.rf.Start(f)
if isLeader == false {
reply.WrongLeader = true
return
}
sm.mu.Lock()
_, ok := sm.ret[index]
if ok == false {
sm.ret[index] = make(chan OpReply, 1)
}
cha := sm.ret[index]
sm.mu.Unlock()
select {
case log := <- cha:
argus, ok := log.args.(MoveArgs)
if ok == false {
reply.WrongLeader = true
}else{
if args.ClientId == argus.ClientId {
if args.RequestId == argus.RequestId {
reply.Err = log.reply.(MoveReply).Err
reply.WrongLeader = false
}else{
reply.WrongLeader= true
}
}else{
reply.WrongLeader = true
}
}
case <- time.After(time_1):
reply.WrongLeader = true
}
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
// Your code here.
f := Op{OpType: "Query", Args: *args}
index, _, isLeader := sm.rf.Start(f)
if isLeader == false {
reply.WrongLeader = true
return
}
sm.mu.Lock()
_, ok := sm.ret[index]
if ok == false {
sm.ret[index] = make(chan OpReply, 1)
}
cha := sm.ret[index]
sm.mu.Unlock()
select {
case log := <- cha:
argus, ok := log.args.(QueryArgs)
if ok == false {
reply.WrongLeader = true
}else{
if args.ClientId == argus.ClientId {
if args.RequestId == argus.RequestId {
reply.Err = log.reply.(QueryReply).Err
reply.WrongLeader = false
}else{
reply.WrongLeader= true
}
}else{
reply.WrongLeader = true
}
}
case <- time.After(time_1):
reply.WrongLeader = true
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() {
sm.rf.Kill()
// Your code here, if desired.
}
// needed by shardkv tester
func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func (sm *ShardMaster) Update() {
for true {
log := <- sm.applyCh
tp := log.Command.(Op)
var cid int64
var rid int
var result OpReply
switch tp.OpType{
case Join:
args := tp.Args.(JoinArgs)
cid = args.ClientId
rid = args.RequestId
result.args = args
case Leave:
args := tp.Args.(LeaveArgs)
cid = args.ClientId
rid = args.RequestId
result.args = args
case Move:
args := tp.Args.(MoveArgs)
cid = args.ClientId
rid = args.RequestId
result.args = args
case Query:
args := tp.Args.(QueryArgs)
cid = args.ClientId
rid = args.RequestId
result.args = args
}
result.OpType = tp.OpType
dup := sm.duplication(cid, rid)
result.reply = sm.getApply(tp, dup)
sm.sendResult(log.Index, result)
sm.Validation()
}
}
func (sm *ShardMaster) Validation(){
c := sm.configs[sm.configNum]
for _, v := range c.Shards {
if len(c.Groups) == 0 && v == 0 {
continue
}
if _, ok := c.Groups[v]; !ok {
DPrintln("Check failed that", v, "group does not exit", c.Shards, c.Groups)
}
}
}
func (sm *ShardMaster) Apply(request Op, isDuplicated bool) interface{} {
sm.mu.Lock()
defer sm.mu.Unlock()
switch request.Args.(type) {
case JoinArgs:
var reply JoinReply
if !isDuplicated {
sm.ApplyJoin(request.Args.(JoinArgs))
}
reply.Err = OK
return reply
case LeaveArgs:
var reply LeaveReply
if !isDuplicated {
sm.ApplyLeave(request.Args.(LeaveArgs))
}
reply.Err = OK
return reply
case MoveArgs:
var reply MoveReply
if !isDuplicated {
sm.ApplyMove(request.Args.(MoveArgs))
}
reply.Err = OK
return reply
case QueryArgs:
var reply QueryReply
args := request.Args.(QueryArgs)
if args.Num == -1 || args.Num > sm.configNum {
reply.Config = sm.configs[sm.configNum]
} else {
reply.Config = sm.configs[args.Num]
}
reply.Err = OK
return reply
}
return nil
}
func (sm *ShardMaster) ApplyJoin(args JoinArgs) {
cfg := sm.NextConfig()
_, exist := cfg.Groups[args.GID]
if exist == false {
cfg.Groups[args.GID] = args.Servers
sm.ReBalanceShards(cfg, Join, args.GID)
}
}
func (sm *ShardMaster) ApplyLeave(args LeaveArgs) {
cfg := sm.NextConfig()
_, exist := cfg.Groups[args.GID]
if exist == false {
delete(cfg.Groups, args.GID)
sm.ReBalanceShards(cfg, Leave, args.GID)
}
}
func (sm *ShardMaster) ApplyMove(args MoveArgs) {
cfg := sm.NextConfig()
cfg.Shards[args.Shard] = args.GID
}
func (sm *ShardMaster) ReBalanceShards(cfg *Config, request string, gid int) {
shardsCount := sm.CountShards(cfg)
switch request {
case Join:
meanNum := NShards / len(cfg.Groups)
for i := 0; i < meanNum; i++ {
maxGid := sm.GetMaxGidByShards(shardsCount)
if len(shardsCount[maxGid]) == 0 {
DPrintf("ReBalanceShards: max gid does not have shards")
debug.PrintStack()
os.Exit(-1)
}
cfg.Shards[shardsCount[maxGid][0]] = gid
shardsCount[maxGid] = shardsCount[maxGid][1:]
}
case Leave:
shardsArray := shardsCount[gid]
delete(shardsCount, gid)
for _, v := range(shardsArray) {
minGid := sm.GetMinGidByShards(shardsCount)
cfg.Shards[v] = minGid
shardsCount[minGid] = append(shardsCount[minGid], v)
}
}
}
func (sm *ShardMaster) GetMaxGidByShards(shardsCount map[int][]int) int {
max := -1
var gid int
for k, v := range shardsCount {
if max < len(v) {
max = len(v)
gid = k
}
}
return gid
}
func (sm *ShardMaster) GetMinGidByShards(shardsCount map[int][]int) int {
min := -1
var gid int
for k, v := range shardsCount {
if min == -1 || min > len(v) {
min = len(v)
gid = k
}
}
return gid
}
func (sm *ShardMaster) CountShards(cfg *Config) map[int][]int {
shardsCount := map[int][]int{}
for k := range cfg.Groups {
shardsCount[k] = []int{}
}
for k, v := range cfg.Shards {
shardsCount[v] = append(shardsCount[v], k)
}
return shardsCount
}
func (sm *ShardMaster) sendResult(i int, result OpReply) {
sm.mu.Lock()
defer sm.mu.Unlock()
_, ok := sm.ret[i]
if ok == false {
sm.ret[i] = make(chan OpReply, 1)
} else {
select {
case <- sm.ret[i]:
default:
}
}
sm.ret[i] <- result
}
func (sm *ShardMaster) duplication(i int64, j int) bool {
sm.mu.Lock()
defer sm.mu.Unlock()
value, ok := sm.ack[i]
if ok == true{
if value >= j {
return true
}
}
sm.ack[i] = j
return false
}
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
gob.Register(Op{})
gob.Register(JoinArgs{})
gob.Register(LeaveArgs{})
gob.Register(MoveArgs{})
gob.Register(QueryArgs{})
gob.Register(JoinReply{})
gob.Register(LeaveReply{})
gob.Register(MoveReply{})
gob.Register(QueryReply{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.configNum = 0
sm.ack = make(map[int64]int)
sm.ret = make(map[int]chan OpReply,1)
go sm.
return sm
}
|
package solutions
type NestedIterator struct {
list []*NestedInteger
i int
current *NestedIterator
}
func Constructor(nestedList []*NestedInteger) *NestedIterator {
return &NestedIterator{list: nestedList}
}
func (this *NestedIterator) Next() int {
if this.list[this.i].IsInteger() {
result := this.list[this.i].GetInteger()
this.i++
return result
}
return this.current.Next()
}
func (this *NestedIterator) HasNext() bool {
if this.i >= len(this.list) {
return false
}
if this.list[this.i].IsInteger() {
return true
}
if this.current == nil {
this.current = Constructor(this.list[this.i].GetList())
}
if this.current.HasNext() {
return true
}
this.i++
this.current = nil
return this.HasNext()
}
|
package api
import (
"os"
"github.com/pkg/errors"
"github.com/suborbital/reactr/rcap"
"github.com/suborbital/reactr/rwasm/runtime"
)
func GetStaticFileHandler() runtime.HostFn {
fn := func(args ...interface{}) (interface{}, error) {
namePointer := args[0].(int32)
nameeSize := args[1].(int32)
ident := args[2].(int32)
ret := get_static_file(namePointer, nameeSize, ident)
return ret, nil
}
return runtime.NewHostFn("get_static_file", 3, true, fn)
}
func get_static_file(namePtr int32, nameSize int32, ident int32) int32 {
inst, err := runtime.InstanceForIdentifier(ident, true)
if err != nil {
runtime.InternalLogger().Error(errors.Wrap(err, "[rwasm] alert: invalid identifier used, potential malicious activity"))
return -1
}
name := inst.ReadMemory(namePtr, nameSize)
file, err := inst.Ctx().FileSource.GetStatic(string(name))
if err != nil {
runtime.InternalLogger().Error(errors.Wrap(err, "[rwasm] failed to GetStatic"))
if err == rcap.ErrFileFuncNotSet {
return -2
} else if err == os.ErrNotExist {
return -3
}
return -4
}
inst.SetFFIResult(file)
return int32(len(file))
}
|
package sample
import (
"github.com/gin-gonic/gin"
jwt "github.com/kyfk/gin-jwt"
"github.com/wyllisMonteiro/go-api-template/pkg/jwt_auth"
)
//Routes All routes for sample
func Routes(r *gin.RouterGroup, jwtAuth jwt.Auth) {
r.Use(jwt.ErrorHandler)
r.GET("/", jwt_auth.Operator(jwtAuth), GetSample)
}
|
package entry
type Entry struct {
Path string
VisitedCount int
LastVisited int
}
type Entries []*Entry
func (e Entries) Map(f func(*Entry) interface{}) Entries {
result := make(Entries, len(e))
for _, v := range e {
result = append(result, v)
}
return result
}
func (e Entries) Filter(f func(*Entry) bool) Entries {
result := make(Entries, 0)
for _, v := range e {
if f(v) {
result = append(result, v)
}
}
return result
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dom
// Canvas represents an HTML <canvas> element.
type Canvas struct{ *Element }
// Context2D returns the Context2D for the context.
func (c *Canvas) Context2D() *Context2D { return &Context2D{Object: c.Call("getContext", "2d")} }
// Resize sets the size of the canvas.
func (c *Canvas) Resize(width, height int) {
drp := Win.DevicePixelRatio
style := c.Style
c.Width, c.Height = int(float64(width)*drp), int(float64(height)*drp)
style.Width, style.Height = width, height
c.Context2D().Scale(drp, drp)
}
// NewCanvas returns a new Canvas element.
func NewCanvas(width, height int) *Canvas {
canvas := Canvas{newEl("canvas")}
canvas.Resize(width, height)
return &canvas
}
|
package functest
import (
"fmt"
faker "github.com/dmgk/faker"
pb "github.com/hugdubois/ah-svc-www/pb"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func testGetRsvpCreationRequest(
config FunctionalTestConfig,
) (reqs []*pb.RsvpCreationRequest, extras map[string]interface{}, err error) {
validRequest := pb.NewRsvpCreationRequestGomeetFaker()
verylongString := faker.Lorem().Characters(256)
// error cases
reqs = append(reqs, &pb.RsvpCreationRequest{})
reqs = append(reqs, &pb.RsvpCreationRequest{Names: verylongString})
reqs = append(reqs, &pb.RsvpCreationRequest{Names: validRequest.GetNames()})
reqs = append(reqs, &pb.RsvpCreationRequest{Names: validRequest.GetNames(), Email: validRequest.GetEmail(), ChildrenNameAge: verylongString})
reqs = append(reqs, &pb.RsvpCreationRequest{Names: validRequest.GetNames(), Email: validRequest.GetEmail(), ChildrenNameAge: validRequest.GetChildrenNameAge(), Music: verylongString})
// valid cases
reqs = append(reqs, validRequest)
return reqs, extras, err
}
func testRsvpCreationResponse(
config FunctionalTestConfig,
testsType string,
testCaseResults []*TestCaseResult,
extras map[string]interface{},
) (failures []TestFailure) {
for i, tr := range testCaseResults {
var (
req *pb.RsvpCreationRequest
res *pb.RsvpCreationResponse
err error
ok bool
)
if tr.Request == nil {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "expected request message type pb.RsvpCreationRequest - nil given"})
continue
}
req, ok = tr.Request.(*pb.RsvpCreationRequest)
if !ok {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "expected request message type pb.RsvpCreationRequest - cast fail"})
continue
}
err = tr.Error
//fmt.Printf("%d - %v\n", i, err)
if i < 5 {
if err == nil {
//failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "an error is expected"})
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("an error is expected -- %d - %s", i, testsType)})
}
if testsType == "GRPC" {
var (
expectedCode codes.Code
expectedMessage string
)
e := status.Convert(err)
switch {
case i < 1:
expectedCode = codes.InvalidArgument
expectedMessage = fmt.Sprintf("invalid field Names: value '%s' must length be greater than '2'", req.GetNames())
case i < 2:
expectedCode = codes.InvalidArgument
expectedMessage = fmt.Sprintf("invalid field Names: value '%s' must length be less than '256'", req.GetNames())
case i < 3:
expectedCode = codes.InvalidArgument
expectedMessage = "invalid field Email: Invalid email"
case i < 4:
expectedCode = codes.InvalidArgument
expectedMessage = fmt.Sprintf("invalid field ChildrenNameAge: value '%s' must length be less than '256'", req.GetChildrenNameAge())
case i < 5:
expectedCode = codes.InvalidArgument
expectedMessage = fmt.Sprintf("invalid field Music: value '%s' must length be less than '256'", req.GetMusic())
}
if e.Code() != expectedCode {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Error code \"%v\" is expected got \"%v\"", expectedCode, e.Code())})
}
if e.Message() != expectedMessage {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Error message \"%v\" is expected got \"%v\"", expectedMessage, e.Message())})
}
}
continue
}
if err != nil {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("no error expected got (%s) -- %d %s", err, i, testsType)})
continue
}
if tr.Response == nil {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "a response is expected"})
continue
}
res, ok = tr.Response.(*pb.RsvpCreationResponse)
if !ok {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "expected response message type pb.RsvpCreationResponse - cast fail"})
continue
}
if req == nil || res == nil {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "a request and a response are expected"})
continue
}
if !res.GetOk() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "the true value is expected for the Ok attribute of the response"})
continue
}
if res.GetInfo() == nil {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: "no nil Rsvp is expected"})
continue
}
if res.GetInfo().GetNames() != req.GetNames() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Names is not equal than the request expected '%s' got '%s'", req.GetNames(), res.GetInfo().GetNames())})
continue
}
if res.GetInfo().GetEmail() != req.GetEmail() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Email is not equal than the request expected '%s' got '%s'", req.GetEmail(), res.GetInfo().GetEmail())})
continue
}
if res.GetInfo().GetPresence() != req.GetPresence() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Presence is not equal than the request expected '%s' got '%s'", req.GetPresence(), res.GetInfo().GetPresence())})
continue
}
if res.GetInfo().GetChildrenNameAge() != req.GetChildrenNameAge() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("ChildrenNameAge is not equal than the request expected '%s' got '%s'", req.GetChildrenNameAge(), res.GetInfo().GetChildrenNameAge())})
continue
}
if res.GetInfo().GetHousing() != req.GetHousing() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Housing is not equal than the request expected '%s' got '%s'", req.GetHousing(), res.GetInfo().GetHousing())})
continue
}
if res.GetInfo().GetMusic() != req.GetMusic() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Music is not equal than the request expected '%s' got '%s'", req.GetMusic(), res.GetInfo().GetMusic())})
continue
}
if res.GetInfo().GetBrunch() != req.GetBrunch() {
failures = append(failures, TestFailure{Procedure: "RsvpCreation", Message: fmt.Sprintf("Brunch is not equal than the request expected '%s' got '%s'", req.GetBrunch(), res.GetInfo().GetBrunch())})
continue
}
//fmt.Printf("%d - %v\n", i, res)
// TODO more tests
}
return failures
}
|
package server
import (
"encoding/json"
"eosApi/httpPost"
"fmt"
)
func GetBlock()(string,int,int){
HeadBlockNum:=GetInfo()
type AutoGenerated struct {
BlockNumOrID int `json:"block_num_or_id"`
}
block_num_0:=AutoGenerated{
HeadBlockNum,
}
block_num_1,_:=json.Marshal(block_num_0)
block_num :=string(block_num_1)
//fmt.Println("HeadBlockNum",HeadBlockNum)
//获取返回信息
body :=httpPost.HttpPost(block_num,"chain","get_block")
fmt.Println("getblock返回信息:",string(body))
type getblock_0 struct {
Timestamp string `json:"timestamp"`
Producer string `json:"producer"`
Confirmed int `json:"confirmed"`
Previous string `json:"previous"`
TransactionMroot string `json:"transaction_mroot"`
ActionMroot string `json:"action_mroot"`
ScheduleVersion int `json:"schedule_version"`
NewProducers interface{} `json:"new_producers"`
HeaderExtensions []interface{} `json:"header_extensions"`
ProducerSignature string `json:"producer_signature"`
Transactions []interface{} `json:"transactions"`
BlockExtensions []interface{} `json:"block_extensions"`
ID string `json:"id"`
BlockNum int `json:"block_num"`
RefBlockPrefix int `json:"ref_block_prefix"`
}
var getblock getblock_0
json.Unmarshal(body,&getblock)
return getblock.Timestamp,getblock.RefBlockPrefix,HeadBlockNum
}
|
package main
import (
"fmt"
"time"
)
func myticker() {
flag := false
ticker := time.NewTicker(2 * time.Second)
go func() {
time.Sleep(10 * time.Second)
ticker.Stop()
flag = true
}()
for {
if flag {
break
}
fmt.Println(<-ticker.C)
}
fmt.Println("myticker end")
}
func main() {
myticker()
fmt.Println("main func end")
}
|
package gopherillamail
import (
"fmt"
"net/http"
"net/http/cookiejar"
"time"
)
const (
STANDARD_IP = "127.0.0.1"
)
// Mail is an e-mail from GuerrillaMail
type Mail struct {
guid string
subject string
sender string
time time.Time
read bool
excerpt string
body string
}
// Inbox is a struct that allows you to retrieve e-mails from GuerrillaMail
type Inbox struct {
UserAgent string
IP string
sid_token string
Email string
EmailList []Mail
emailTimestamp uint
httpclient *http.Client
}
// Returns an Inbox without any email
func blankInbox(userAgent string) (*Inbox, error) {
cjar, _ := cookiejar.New(nil)
hcl := &http.Client{
Jar: cjar,
}
inb := &Inbox{
httpclient: hcl,
IP: STANDARD_IP,
UserAgent: userAgent,
}
return inb, nil
}
// NewInbox returns an Inbox with a custom email
func NewInbox(userAgent, email string) (*Inbox, error) {
inb, err := blankInbox(userAgent)
if err != nil {
return nil, err
}
err = inb.setEmail(email)
if err != nil {
return nil, err
}
return inb, nil
}
// AnonymousInbox returns an Inbox with a random email
func AnonymousInbox(userAgent string) (*Inbox, error) {
inb, err := blankInbox(userAgent)
if err != nil {
return inb, fmt.Errorf("could not create blank inbox: %v", err)
}
err = inb.randomEmail()
if err != nil {
return inb, fmt.Errorf("could not create random email: %v", err)
}
err = inb.getEmail() // You have to call this at least once to set the sid_token and the Email in the struct
if err != nil {
return inb, fmt.Errorf("could not get initial email list: %v", err)
}
return inb, nil
}
// Does a function call to Guerrillamail's api
func (c *Inbox) doRequest(functionName string, args map[string]string) error {
req, err := http.NewRequest(
"GET",
fmt.Sprintf(
"http://api.guerrillamail.com/ajax.php?f=%s&ip=%s&agent=%s",
functionName,
c.IP,
c.UserAgent,
),
nil,
)
if err != nil {
return fmt.Errorf("could not build request to GuerrillaMail: %v", err)
}
// Build the querystring from the arguments
q := req.URL.Query()
for key, val := range args {
q.Add(key, val)
}
// Set the querystring
req.URL.RawQuery = q.Encode()
resp, err := c.httpclient.Do(req)
if err != nil {
return fmt.Errorf("could not do request to GuerrillaMail: %v", err)
}
defer resp.Body.Close()
return nil
}
// SetUserAgent sets the user agent
func (c *Inbox) SetUserAgent(userAgent string) {
c.UserAgent = userAgent
}
// Sets the email address
func (c *Inbox) setEmail(userAgent string) error {
err := c.doRequest()
if err != nil {
return fmt.Errorf("could not set email address: %v", err)
}
return nil
}
// getEmail Gets the e-mail address of your inbox and sets the sid_token
func (c *Inbox) getEmail() error {
err := c.doRequest(
"get_email_address",
map[string]string{
"lang": "en",
// "",
},
)
if err != nil {
return fmt.Errorf("could not get email address: %v", err)
}
return nil
}
// getEmailList does the initial call to initialize the EmailList. Shouldn't be used to check for new e-mails.
func (c *Inbox) getEmailList() error {
err := c.doRequest()
if err != nil {
return fmt.Errorf("could not get initial emails: %v", err)
}
return nil
}
// Asks Guerrillamail for a random email address
func (c *Inbox) randomEmail() error {
err := c.doRequest()
if err != nil {
return fmt.Errorf("could not generate random email: %v", err)
}
return nil
}
// SetIP sets the client IP
func (c *Inbox) SetIP(IP string) {
c.IP = IP
}
|
package types
import (
"math/big"
"time"
mint "github.com/void616/gm.mint"
)
// Approvement model
type Approvement struct {
ID uint64
Transport SendingTransport
Status SendingStatus
To mint.PublicKey
Sender *mint.PublicKey
SenderNonce *uint64
Digest *mint.Digest
SentAtBlock *big.Int
Block *big.Int
Service string
RequestID string
CallbackURL string
FirstNotifyAt *time.Time
NotifyAt *time.Time
Notified bool
}
|
package main
import (
"github.com/garyburd/redigo/redis"
)
// DBWriter is an interface for database writers.
type DBWriter interface {
Writer()
AddKey(blocking bool, key string, value interface{})
DeleteKey(blocking bool, key ...string)
SetAdd(blocking bool, key string, members ...interface{})
SetRemove(blocking bool, key string, members ...interface{})
CloseConn()
}
// command consists of a command and arguments to be sent to
// a database with a DBWriter.
type command struct {
comm string
args []interface{}
}
// redisWriter is a DBWriter for redis.
type redisWriter struct {
conn *redis.Conn
commChannel chan command
}
// write from redisWriter sends specified commands and arguments
// to the object's communication channel.
func (rw *redisWriter) write(blocking bool, comm string, args ...interface{}) {
if blocking {
rw.commChannel <- command{comm: comm, args: args}
} else {
select {
case rw.commChannel <- command{comm: comm, args: args}:
default:
}
}
}
// writer from redisWriter sends commands and arguments from the
// object's communication channel to the redis database.
func (rw *redisWriter) Writer() {
for c := range rw.commChannel {
(*rw.conn).Do(c.comm, c.args...)
}
}
// AddKey from redisWriter adds a specified key and value to redis.
func (rw *redisWriter) AddKey(blocking bool, key string, value interface{}) {
rw.write(blocking, "SET", key, value)
}
// DeleteKey from redisWriter deletes a specified key from redis.
func (rw *redisWriter) DeleteKey(blocking bool, key ...string) {
rw.write(blocking, "DEL", key)
}
// SetAdd from redisWriter adds members to a set in redis.
func (rw *redisWriter) SetAdd(blocking bool, key string, members ...interface{}) {
rw.write(blocking, "SADD", key, members)
}
// SetRemove from redisWriter removes members from a set in redis.
func (rw *redisWriter) SetRemove(blocking bool, key string, members ...interface{}) {
rw.write(blocking, "SREM", members)
}
// CloseConn from redisWriter closes the connection to redis.
func (rw *redisWriter) CloseConn() {
(*rw.conn).Close()
}
|
package solutions
import (
"sort"
)
func largestDivisibleSubset(nums []int) []int {
sort.Slice(nums, func (i int, j int) bool {
return nums[i] > nums[j]
})
dictionary := make(map[int][]int)
return findLargestDivisibleSubset(nums, dictionary, 0)
}
func findLargestDivisibleSubset(nums []int, dictionary map[int][]int, current int) []int {
if len(nums) == 0 {
return []int{}
}
var result []int
for i := 0; i < len(nums); i++ {
var temp []int
if current% nums[i] == 0 {
if value, ok := dictionary[nums[i]]; ok {
temp = value
} else {
temp = append([]int{nums[i]}, findLargestDivisibleSubset(nums[i + 1:], dictionary, nums[i])...)
}
dictionary[nums[i]] = temp
if len(temp) > len(result) {
result = temp
}
}
}
return result
}
|
package main
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"log"
)
func main() {
//crypto.Hash.String()
b, _ := aes.NewCipher([]byte("Test1234Test1234"))
data, err := ioutil.ReadFile("input.pdf")
if err != nil {
fmt.Println("Error :", err.Error())
}
fmt.Println("LEN:", len(data))
gcm, err := cipher.NewGCM(b)
if err != nil {
log.Panic(err)
}
// Never use more than 2^32 random nonces with a given key
// because of the risk of repeat.
nonce := make([]byte, gcm.NonceSize())
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
log.Fatal(err)
}
ciphertext := gcm.Seal(nonce, nonce, data, nil)
// Save back to file
err = ioutil.WriteFile("ciphertext.pdf", ciphertext, 0777)
if err != nil {
log.Panic(err)
}
//Decrypting
reverseNonce := data[:gcm.NonceSize()]
data = data[gcm.NonceSize():]
plaintext, err := gcm.Open(nil, reverseNonce, data, nil)
if err != nil {
log.Panic(err)
}
err = ioutil.WriteFile("input.pdf", plaintext, 0777)
if err != nil {
log.Panic(err)
}
}
|
package restore_test
import (
"database/sql"
"fmt"
"log"
"github.com/cvgw/sql-db-restore/pkg/restore"
)
func ExampleRestoreSQLFile() {
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@/", "foo", "bar"))
if err != nil {
log.Fatal("could not open connection to sql db")
}
restore.RestoreSQLFile(db, "dump.sql")
}
|
package main
import "fmt"
import "unsafe"
type User struct {
id int64
name string
}
func main() {
var v User
v.id = 1
v.name = "v"
var n int32 = 1
var s string = "v"
fmt.Println(unsafe.Sizeof(n))
fmt.Println(unsafe.Sizeof(s))
fmt.Println(*&v.id)
}
|
package main
import (
"fmt"
"runtime"
"time"
)
const c = 10000000
func funcGoLoop(ch chan<- int) {
n := 0
for i := 0; i < c; i++ {
n += i
}
ch <- n
}
func funcGo() {
ch := make(chan int)
go funcGoLoop(ch)
go funcGoLoop(ch)
<-ch
<-ch
}
func main() {
runtime.GOMAXPROCS(1)
t1 := time.Now()
funcGo()
fmt.Println("GOMAXPROCS=1: ", time.Since(t1))
runtime.GOMAXPROCS(2)
t2 := time.Now()
funcGo()
fmt.Println("GOMAXPROCS=2: ", time.Since(t2))
runtime.GOMAXPROCS(1)
go func() {
fmt.Println("go")
for {
}
}()
time.Sleep(time.Millisecond)
fmt.Println("main")
}
|
package main
import (
"encoding/csv"
"io/ioutil"
"log"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
)
const BIRTHS = `<h2>.+Births.+</h2>`
const DEATHS = `<h2>.+Deaths.+</h2>`
const HOLIDAYS = `<h2>.+Holidays.+</h2>`
const DELIMITTER = "–"
func checkError(err error) {
if err != nil {
log.Fatal(err)
return
}
}
func generateUrl(month string, day int) string {
rootURL := "http://en.wikipedia.org/wiki/"
URL := rootURL + month + "_" + strconv.Itoa(day)
return URL
}
func generateURLBody(URL string) *string {
res, err := http.Get(URL)
checkError(err)
defer res.Body.Close()
bodyByte, err := ioutil.ReadAll(res.Body)
checkError(err)
bodyString := string(bodyByte)
return &(bodyString)
}
func generateLabelStrings(URLBody *string, label string) []string{
var firstLabel string
var nextLabel string
switch label {
case "BIRTHS":
firstLabel = BIRTHS
nextLabel = DEATHS
case "DEATHS":
firstLabel = DEATHS
nextLabel = HOLIDAYS
}
firstReg := regexp.MustCompile(firstLabel)
nextReg := regexp.MustCompile(nextLabel)
firstLabelIndex := firstReg.FindStringIndex(*URLBody)
nextLabelIndex := nextReg.FindStringIndex(*URLBody)
labelStrings := strings.ToLower((*URLBody)[firstLabelIndex[1]+1:nextLabelIndex[0]])
return strings.Split(labelStrings, "\n")
}
func sparseName(fullName string) (string, string) {
invalidNameSuffix := map[string]int{"i":0, "ii":0, "iii":0, "iv":0, "v":0, "vi":0, "vii":0,
"viii":0, "ix":0, "x":0, "xi":0, "xiii":0}
var (
firstName string
lastName string
)
fullName = strings.TrimRight(fullName, " ")
fullNameList := strings.Split(fullName, " ")
if len(fullNameList) >= 2 {
firstName = fullNameList[0]
for i := len(fullNameList)-1; i >= 0 && fullNameList[i] != " " ; i-- {
_, ok := invalidNameSuffix[fullNameList[i]]
if !ok {
lastName = fullNameList[i]
break
}
}
}
return firstName, lastName
}
func extractYear(singlePersonInfo string) int {
yearLabel := `[0-9]{4}`
yearReg := regexp.MustCompile(yearLabel)
yearIndex := yearReg.FindStringIndex(singlePersonInfo)
if len(yearIndex) != 0 {
year, err := strconv.Atoi(singlePersonInfo[yearIndex[0]:(yearIndex[0]+4)])
checkError(err)
return year
}
return 0
}
func extractName(singlePersonInfo string) string {
titleLabel := `title="`
titleReg := regexp.MustCompile(titleLabel)
nameIndex := titleReg.FindStringIndex(singlePersonInfo)
if len(nameIndex) >= 2 {
startIndex := nameIndex[1]
var endIndex int
for i := startIndex; string(singlePersonInfo[i]) != "\"" && string(singlePersonInfo[i]) != "("; i++ {
endIndex = i+1
}
fullName := singlePersonInfo[startIndex:endIndex]
fullName = strings.Split(fullName, ",")[0]
fullName = strings.Split(fullName, ";")[0]
fullName = strings.TrimSpace(fullName)
return fullName
}
return ""
}
func extractDescription(singlePersonInfo string) string {
singlePersonDescription := strings.Split(singlePersonInfo, ",")[len(strings.Split(singlePersonInfo, ","))-1]
var endIndex int
for i := 0; string(singlePersonDescription[i]) != "<"; i++ {
endIndex = i+1
}
return singlePersonDescription[:endIndex]
}
func checkWeekday(year int, month string, day int) string {
const longForm = "January 2, 2006"
date, _ := time.Parse(longForm, month + " " + strconv.Itoa(day) + ", " + strconv.Itoa(year))
return date.Weekday().String()
}
func handleAllPersons(singlePerson *string, p Person) {
singlePersonInfo := strings.Split(*singlePerson, DELIMITTER)
if len(singlePersonInfo) == 2 {
year := extractYear(singlePersonInfo[0])
fullName := extractName(singlePersonInfo[1])
firstName, lastName := sparseName(fullName)
description := extractDescription(singlePersonInfo[1])
weekday := checkWeekday(year, p.GetMonth(), int(p.GetDay()))
p.Year = int32(year)
p.FullName = fullName
p.FirstName = firstName
p.LastName = lastName
p.Dob = strconv.Itoa(year) + "-" + p.GetMonth() + "-" + strconv.Itoa(int(p.GetDay()))
p.Description = strings.TrimSpace(description)
p.Weekday = weekday
}
}
func combinePersonInfo(p Person) []string {
combinedPersonInfoSlice := []string{}
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetDob())
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetFullName())
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetFirstName())
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetLastName())
combinedPersonInfoSlice = append(combinedPersonInfoSlice, strconv.Itoa(int(p.GetYear())))
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetMonth())
combinedPersonInfoSlice = append(combinedPersonInfoSlice, strconv.Itoa(int(p.GetDay())))
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetDescription())
combinedPersonInfoSlice = append(combinedPersonInfoSlice, p.GetWeekday())
return combinedPersonInfoSlice
}
func main() {
dates := map[string]int{"January": 31, "February": 29, "March": 31,
"April": 30, "May": 31, "June": 30,
"July": 31, "August": 31, "September": 30,
"October": 31, "November": 30, "December": 31}
//dates := map[string]int{"April":2}
startDate := 1
file, err := os.OpenFile("test_pb.csv", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
checkError(err)
defer file.Close()
for month, days := range dates {
for day := startDate; day <= days; day++ {
URL := generateUrl(month, day)
URLBody := generateURLBody(URL)
labelStrings := generateLabelStrings(URLBody, "BIRTHS")
for _, singlePerson := range labelStrings {
p := Person{
Month: month,
Day: int32(day),
}
handleAllPersons(&singlePerson, p)
if len(strconv.Itoa(int(p.GetYear()))) != 4 || len(singlePerson) < 10 {
continue
}
combinedPersonInfo := combinePersonInfo(p)
csvWriter := csv.NewWriter(file)
csvWriter.Write(combinedPersonInfo)
csvWriter.Flush()
}
}
}
}
|
// package react
//
// const testVersion = 4
//
// /* reactor */
//
// type reactor struct {
// cells []*compuCell
// }
//
// func New() Reactor {
// return &reactor{}
// }
//
// func (r *reactor) CreateCompute1(c Cell, f func(int) int) ComputeCell {
// old := f(c.Value())
// cc := new(compuCell)
// cc.cb = make(map[CallbackHandle]func(int))
// cc.eval = func() int {
// v := f(c.Value())
// if v != old {
// for _, cb := range cc.cb {
// cb(v)
// }
// old = v
// }
// return v
// }
// r.cells = append(r.cells, cc)
// return cc
// }
//
// func (r *reactor) CreateCompute2(c1, c2 Cell, f func(int, int) int) ComputeCell {
// old := f(c1.Value(), c2.Value())
// cc := new(compuCell)
// cc.cb = make(map[CallbackHandle]func(int))
// cc.eval = func() int {
// v := f(c1.Value(), c2.Value())
// if v != old {
// for _, cb := range cc.cb {
// cb(v)
// }
// old = v
// }
// return v
// }
// r.cells = append(r.cells, cc)
// return cc
// }
//
// func (r *reactor) CreateInput(i int) InputCell {
// return &inputCell{
// value: i,
// reactor: r,
// }
// }
//
// /* input cell */
//
// type inputCell struct {
// value int
// *reactor
// }
//
// func (c *inputCell) SetValue(i int) {
// if c.value != i {
// c.value = i
// for _, cc := range c.cells {
// cc.eval()
// }
// }
// }
//
// func (c *inputCell) Value() int {
// return c.value
// }
//
// /* compute cell */
//
// type compuCell struct {
// eval func() int
// cb map[CallbackHandle]func(int)
// }
//
// func (c *compuCell) AddCallback(f func(int)) CallbackHandle {
// c.cb[&f] = f
// return &f // guaranteed to be uniq
// }
//
// func (c *compuCell) RemoveCallback(h CallbackHandle) {
// delete(c.cb, h)
// }
//
// func (c *compuCell) Value() int {
// return c.eval()
// }
|
package tb
import (
"encoding/json"
)
type Cfg_Area struct {
AreaId int32
AreaName string
StartDate int32
State int32
}
type CfgData struct {
Ver string
Items *CfgDataAll
}
type CfgDataAll struct {
Cfg_Area []*Cfg_Area
}
var Data *CfgDataAll
var Ver string
func InitCfgData(data []byte) error {
var errcode error
var allData CfgData
errcode = json.Unmarshal(data, &allData)
if errcode != nil {
return errcode
}
Ver = allData.Ver
Data = allData.Items
return nil
}
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cycle
import (
"cycle/one"
"github.com/golang/dep/gps"
)
var (
A = gps.Solve
B = one.A
)
|
package main
//883. 三维形体投影面积
//在n x n的网格grid中,我们放置了一些与 x,y,z 三轴对齐的1 x 1 x 1立方体。
//
//每个值v = grid[i][j]表示 v个正方体叠放在单元格(i, j)上。
//
//现在,我们查看这些立方体在 xy、yz和 zx平面上的投影。
//
//投影就像影子,将 三维 形体映射到一个 二维 平面上。从顶部、前面和侧面看立方体时,我们会看到“影子”。
//
//返回 所有三个投影的总面积 。
//
//
//
//示例 1:
//
//
//
//输入:[[1,2],[3,4]]
//输出:17
//解释:这里有该形体在三个轴对齐平面上的三个投影(“阴影部分”)。
//示例2:
//
//输入:grid = [[2]]
//输出:5
//示例 3:
//
//输入:[[1,0],[0,2]]
//输出:8
//
//
//提示:
//
//n == grid.length == grid[i].length
//1 <= n <= 50
//0 <= grid[i][j] <= 50
func projectionArea(grid [][]int) int {
var result int
for i, v := range grid {
row, col := 0, 0
for j, w := range v {
if w > 0 {
result += 1
}
if w > row {
row = w
}
if grid[j][i] > col {
col = grid[j][i]
}
}
result += row
result += col
}
return result
}
|
package main
import (
"fmt"
"time"
"strings"
)
func BuildLocationsList() (locations Locations, err error) {
miners, err := MinersCollection()
locations = Locations{}
locationsAdded := map[string]Location {}
// Add the default 'all' location
allLocations := Location{
Name: "all",
}
locations = append(locations, allLocations)
locationsAdded["all"] = allLocations
for _, miner := range miners {
if _, exists := locationsAdded[miner.Location]; !exists {
locationsAdded[miner.Location] = Location{
Name: miner.Location,
GeoCoord: miner.GeoCoord,
}
locations = append(locations, locationsAdded[miner.Location])
}
}
return
}
func stringInSlice(str string, list []string) bool {
for _, v := range list {
if v == str {
return true
}
}
return false
}
func CollectStopwords(location string, source string) (stopwords []string) {
stoprows, err := QueryStopwordsFor(location, source)
checkErr(err)
stopwords = []string{"http"}
for stoprows.Next() {
var stopstr string
err := stoprows.Scan(&stopstr)
checkErr(err)
stopstr = strings.ToLower(stopstr)
stopstr = strings.Replace(stopstr, " ", "", -1)
stopwords = append(stopwords, strings.Split(stopstr, ",")...)
}
return stopwords
}
func WordCountRootCollection(location string, source string, fromParam string, toParam string, interval int, limit int) (sortedCounts WordCounts, collectionErr error) {
defer func() {
if r := recover(); r != nil {
var ok bool
collectionErr, ok = r.(error)
if !ok {
collectionErr = fmt.Errorf("WordCountRootCollection: %v", r)
}
}
}()
wordCounts := WordCounts {}
if location == "all" {
location = ""
}
t := time.Now()
if fromParam == "" {
from := t.Add(-24 * time.Hour)
fromParam = from.Format("200601021504")
}
if toParam == "" {
toParam = t.Format("200601021504")
}
fromTime, err := time.Parse("200601021504", fromParam)
if err != nil {
fmt.Errorf("invalid from date: %v", err)
}
toTime, err := time.Parse("200601021504", toParam)
if err != nil {
fmt.Errorf("invalid to date: %v", err)
}
duration := toTime.Sub(fromTime)
duration = duration / time.Duration(interval)
stopwords := CollectStopwords(location, source)
for i := 0; i < interval; i++ {
toTime = fromTime.Add(duration)
toParam = toTime.Format("200601021504")
rows, err := QueryTerms(source, location, "", fromParam, toParam)
checkErr(err)
for rows.Next() {
var uid int
var postid int
var term string
var wordcount int
var posted time.Time
var termLocation string
var locationHash int
var termSource string
err := rows.Scan(&uid, &postid, &term, &wordcount, &posted, &termLocation, &locationHash, &termSource)
checkErr(err)
wordCount := WordCount {
Term: term,
Occurrences: wordcount,
Series: []int{wordcount},
Sequence: i,
}
if !strings.ContainsAny(term, "<>[]/:;()=\"") && !stringInSlice(term, stopwords) {
wordCounts = append(wordCounts, wordCount)
}
}
fromTime = fromTime.Add(duration)
fromParam = fromTime.Format("200601021504")
}
totalCounts := map[string]int {}
serieses := map[string][]int {}
for _, wordcount := range wordCounts {
if _, ok := serieses[wordcount.Term]; ok {
} else {
serieses[wordcount.Term] = make([]int, int(interval))
}
}
for _, wordcount := range wordCounts {
count := totalCounts[wordcount.Term]
count = count + wordcount.Occurrences
totalCounts[wordcount.Term] = count
serieses[wordcount.Term][wordcount.Sequence] = serieses[wordcount.Term][wordcount.Sequence] + 1
}
velocityCounts := map[string]WordCount {}
// For ordering by velocity
for key, _ := range totalCounts {
if totalCounts[key] > 1 {
// Calculate the velocity
velocity := 0.0
if serieses[key][interval - 2] == 0 {
velocity = float64(serieses[key][interval - 1])
} else {
velocity = (float64(serieses[key][interval - 1]) - float64(serieses[key][interval - 2])) / float64(serieses[key][interval - 2])
}
velocityCounts[key] = WordCount {
Term: key,
Occurrences: totalCounts[key],
Series: serieses[key],
Velocity: velocity,
}
}
}
rankings := map[float64]int {}
sortedCounts = WordCounts {}
for _, res := range sortedWordCountKeys(velocityCounts) {
if _, ok := rankings[velocityCounts[res].Velocity]; ok {
rankings[velocityCounts[res].Velocity]++
} else {
rankings[velocityCounts[res].Velocity] = 1
}
if len(rankings) > limit {
break
}
sortedCounts = append(sortedCounts, velocityCounts[res])
}
// For ordering by occurrances
/*
sortedCounts = WordCounts {}
for _, res := range sortedKeys(totalCounts) {
if limit == 0 || len(sortedCounts) < int(limit) {
if totalCounts[res] > 1 {
// Calculate the velocity
seriesAverage := float64(totalCounts[res]) / float64(interval)
//fmt.Println("Total:", totalCounts[res], "interval:", interval)
//fmt.Println("Average:", seriesAverage)
sortedCounts = append(sortedCounts, WordCount {
Term: res,
Occurrences: totalCounts[res],
Series: serieses[res],
Velocity: float64(serieses[res][interval - 1]) / seriesAverage,
})
}
}
}
*/
return
}
func MinersCollection() (miners Miners, err error) {
miners = Miners {}
rows, err := QueryMiners()
if err != nil {
} else {
for rows.Next() {
var uid int
var name string
var source string
var location string
var url string
var geoCoord Point
var locationHash int
var stopwords string
err := rows.Scan(&uid, &name, &source, &location, &url, &geoCoord, &locationHash, &stopwords)
checkErr(err)
miner := Miner {
Uid: uid,
Name: name,
Source: source,
Location: location,
GeoCoord: geoCoord,
Url: url,
Stopwords: stopwords,
}
miners = append(miners, miner)
}
}
return
}
func TrendsCollection(source string, location string, term string, fromParam string, toParam string, interval int, velocityInterval float64, minimumVelocity float64) TermPackage {
if location == "all" {
location = ""
}
t := time.Now()
if fromParam == "" {
from := t.Add(-24 * time.Hour)
fromParam = from.Format("200601021504")
}
if toParam == "" {
toParam = t.Format("200601021504")
}
fromTime, err := time.Parse("200601021504", fromParam)
if err != nil {
fmt.Errorf("invalid from date: %v", err)
}
toTime, err := time.Parse("200601021504", toParam)
if err != nil {
fmt.Errorf("invalid to date: %v", err)
}
duration := toTime.Sub(fromTime)
duration = duration / time.Duration(interval)
termPackage := TermPackage {
Term: term,
Series: make([]int, interval),
Sources: make([]Source, 0),
SourceTypes: make([]SourceType, 0),
}
related := map[string]int {}
totalOccurrences := 0
sourceSerieses := map[string][]int {}
sourceURIsAdded := map[string]bool {}
for i := 0; i < interval; i++ {
toTime = fromTime.Add(duration)
toParam = toTime.Format("200601021504")
rows, err := QueryTerms(source, location, term, fromParam, toParam)
if err != nil {
} else {
for rows.Next() {
var uid int
var postid int
var term string
var wordcount int
var posted time.Time
var location string
var locationHash int
var source string
err := rows.Scan(&uid, &postid, &term, &wordcount, &posted, &location, &locationHash, &source)
checkErr(err)
termPackage.Series[i] = termPackage.Series[i] + wordcount
totalOccurrences = totalOccurrences + wordcount
if _, ok := sourceSerieses[source]; ok {
} else {
sourceSerieses[source] = make([]int, int(interval))
}
sourceSerieses[source][i] = sourceSerieses[source][i] + wordcount
postRows := QueryPosts(fmt.Sprintf(" WHERE uid=%d", postid))
for postRows.Next() {
var thisPostuid int
var mined time.Time
var postPosted time.Time
var sourceURI string
var postLocation string
var postSource string
var postLocationHash int
err = postRows.Scan(&thisPostuid, &mined, &postPosted, &sourceURI, &postLocation, &postSource, &postLocationHash)
checkErr(err)
if _, ok := sourceURIsAdded[sourceURI]; ok {
} else {
sourceURIsAdded[sourceURI] = true
source := Source {
Source: postSource,
Location: postLocation,
SourceURI: sourceURI,
Posted: postPosted,
Mined: mined,
}
termPackage.Sources = append(termPackage.Sources, source)
}
termsRows := QueryTermsForPost(thisPostuid)
for termsRows.Next() {
var wcuid int
var wcpostid int
var wcTerm string
var wordcount int
var wcPosted time.Time
var wcLocation string
var wcLocationHash int
var wcSource string
err := termsRows.Scan(&wcuid, &wcpostid, &wcTerm, &wordcount, &wcPosted, &wcLocation, &wcLocationHash, &wcSource)
checkErr(err)
if _, ok := related[wcTerm]; ok {
related[wcTerm] += wordcount
} else {
related[wcTerm] = wordcount
}
}
}
}
}
// TODO: Need to sort the related terms by velocity
fromTime = fromTime.Add(duration)
fromParam = fromTime.Format("200601021504")
}
for key, value := range sourceSerieses {
termPackage.SourceTypes = append(termPackage.SourceTypes, SourceType {
Name: key,
Series: value,
})
}
for _, res := range sortedKeys(related) {
// Exclude the term we are finding related terms for
if term != res {
termPackage.Related = append(termPackage.Related, Related {
Term: res,
Occurrences: related[res],
})
}
}
// Calculate the velocity
seriesAverage := float64(totalOccurrences) / float64(interval)
if seriesAverage != 0 {
termPackage.Velocity = float64(termPackage.Series[interval - 1]) / seriesAverage
}
/*
fmt.Println("Term:", termPackage.Term)
fmt.Println("Series:", termPackage.Series)
fmt.Println("SourceTypes:", termPackage.SourceTypes)
fmt.Println("Related:", termPackage.Related)
fmt.Println("Sources:", termPackage.Sources)
fmt.Println(related)
fmt.Println(termPackage)
*/
return termPackage
} |
// 本文件由gen_static_data_go生成
// 请遵照提示添加修改!!!
package sd
import "encoding/json"
import "fmt"
import "log"
import "path/filepath"
import "github.com/tealeg/xlsx"
import "github.com/trist725/mgsu/util"
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加扩展import代码
//import_extend_begin
//import_extend_end
//////////////////////////////////////////////////////////////////////////////////////////////////
type Shop struct {
ID int64 `excel_column:"0" excel_name:"id"` // 编号
Des string `excel_column:"2" excel_name:"des"` // 描述
Content []int `excel_column:"3" excel_name:"content"` // 内容
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加结构体扩展字段
//struct_extend_begin
//struct_extend_end
//////////////////////////////////////////////////////////////////////////////////////////////////
}
func NewShop() *Shop {
sd := &Shop{}
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加结构体New代码
//struct_new_begin
//struct_new_end
//////////////////////////////////////////////////////////////////////////////////////////////////
return sd
}
func (sd Shop) String() string {
ba, _ := json.Marshal(sd)
return string(ba)
}
func (sd Shop) Clone() *Shop {
n := NewShop()
*n = sd
n.Content = make([]int, len(sd.Content))
copy(n.Content, sd.Content)
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加结构体Clone代码
//struct_clone_begin
//struct_clone_end
//////////////////////////////////////////////////////////////////////////////////////////////////
return n
}
func (sd *Shop) load(row *xlsx.Row) error {
return util.DeserializeStructFromXlsxRow(sd, row)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
type ShopManager struct {
dataArray []*Shop
dataMap map[int64]*Shop
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加manager扩展字段
//manager_extend_begin
//manager_extend_end
//////////////////////////////////////////////////////////////////////////////////////////////////
}
func newShopManager() *ShopManager {
mgr := &ShopManager{
dataArray: []*Shop{},
dataMap: make(map[int64]*Shop),
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加manager的New代码
//manager_new_begin
//manager_new_end
//////////////////////////////////////////////////////////////////////////////////////////////////
return mgr
}
func (mgr *ShopManager) Load(excelFilePath string) (success bool) {
success = true
absExcelFilePath, err := filepath.Abs(excelFilePath)
if err != nil {
log.Printf("获取 %s 的绝对路径失败, %s", excelFilePath, err)
return false
}
xl, err := xlsx.OpenFile(absExcelFilePath)
if err != nil {
log.Printf("打开 %s 失败, %s\n", excelFilePath, err)
return false
}
if len(xl.Sheets) == 0 {
log.Printf("%s 没有分页可加载\n", excelFilePath)
return false
}
dataSheet, ok := xl.Sheet["data"]
if !ok {
log.Printf("%s 没有data分页\n", excelFilePath)
return false
}
if len(dataSheet.Rows) < 3 {
log.Printf("%s 数据少于3行\n", excelFilePath)
return false
}
for i := 3; i < len(dataSheet.Rows); i++ {
row := dataSheet.Rows[i]
if len(row.Cells) <= 0 {
continue
}
firstColumn := row.Cells[0]
firstComment := firstColumn.String()
if firstComment != "" {
if firstComment[0] == '#' {
// 跳过被注释掉的行
continue
}
}
sd := NewShop()
err = sd.load(row)
if err != nil {
log.Printf("%s 加载第%d行失败, %s\n", excelFilePath, i+1, err)
success = false
continue
}
if sd.ID == 0 {
continue
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加结构体加载代码
//struct_load_begin
//struct_load_end
//////////////////////////////////////////////////////////////////////////////////////////////////
if err := mgr.check(excelFilePath, i+1, sd); err != nil {
log.Println(err)
success = false
continue
}
mgr.dataArray = append(mgr.dataArray, sd)
mgr.dataMap[sd.ID] = sd
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加manager加载代码
//manager_load_begin
//manager_load_end
//////////////////////////////////////////////////////////////////////////////////////////////////
}
return
}
func (mgr ShopManager) Size() int {
return len(mgr.dataArray)
}
func (mgr ShopManager) Get(id int64) *Shop {
sd, ok := mgr.dataMap[id]
if !ok {
return nil
}
return sd.Clone()
}
func (mgr ShopManager) Each(f func(sd *Shop) bool) {
for _, sd := range mgr.dataArray {
if !f(sd.Clone()) {
break
}
}
}
func (mgr *ShopManager) each(f func(sd *Shop) bool) {
for _, sd := range mgr.dataArray {
if !f(sd) {
break
}
}
}
func (mgr ShopManager) findIf(f func(sd *Shop) bool) *Shop {
for _, sd := range mgr.dataArray {
if f(sd) {
return sd
}
}
return nil
}
func (mgr ShopManager) FindIf(f func(sd *Shop) bool) *Shop {
for _, sd := range mgr.dataArray {
n := sd.Clone()
if f(n) {
return n
}
}
return nil
}
func (mgr ShopManager) check(excelFilePath string, row int, sd *Shop) error {
if _, ok := mgr.dataMap[sd.ID]; ok {
return fmt.Errorf("%s 第%d行的id重复", excelFilePath, row)
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加检查代码
//check_begin
//check_end
//////////////////////////////////////////////////////////////////////////////////////////////////
return nil
}
func (mgr *ShopManager) AfterLoadAll(excelFilePath string) (success bool) {
success = true
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加加载后处理代码
//after_load_all_begin
//after_load_all_end
//////////////////////////////////////////////////////////////////////////////////////////////////
return
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// TODO 添加扩展代码
//extend_begin
//extend_end
//////////////////////////////////////////////////////////////////////////////////////////////////
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/local/policyutil/fixtures"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DeviceMinimumVersionNotifications,
LacrosStatus: testing.LacrosVariantNeeded,
Desc: "Notifications of DeviceMinimumVersion policy when device has reached auto update expiration",
Contacts: []string{
"snijhara@google.com", // Test author
"chromeos-commercial-remote-management@google.com",
},
Attr: []string{"group:commercial_limited"},
SoftwareDeps: []string{"chrome"},
Fixture: fixture.FakeDMSEnrolled,
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.DeviceMinimumVersion{}, pci.VerifiedFunctionalityJS),
},
})
}
func DeviceMinimumVersionNotifications(ctx context.Context, s *testing.State) {
fdms := s.FixtValue().(*fakedms.FakeDMS)
// Start a Chrome instance to fetch policies from the FakeDMS.
cr, err := chrome.New(ctx,
chrome.FakeLogin(chrome.Creds{User: fixtures.Username, Pass: fixtures.Password}),
chrome.DMSPolicy(fdms.URL),
chrome.KeepEnrollment(),
chrome.ExtraArgs("--aue-reached-for-update-required-test"))
if err != nil {
s.Fatal("Chrome login failed: ", err)
}
defer cr.Close(ctx)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to get test API connection: ", err)
}
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_update_required_notification")
// Create and update DeviceMinimumVersion policy.
policyValue := policy.DeviceMinimumVersion{
Val: &policy.DeviceMinimumVersionValue{
Requirements: []*policy.DeviceMinimumVersionValueRequirements{
{
AueWarningPeriod: 2,
ChromeosVersion: "99999999",
WarningPeriod: 1,
},
},
},
}
if err := policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{&policyValue}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Check update required notification in case of auto update expiration is visible.
const notificationWaitTime = 10 * time.Second
const notificationID = "policy.update_required" // Hardcoded in Chrome.
_, err = ash.WaitForNotification(ctx, tconn, notificationWaitTime, ash.WaitIDContains(notificationID))
if err != nil {
s.Error("Failed to find update required notification: ", err)
}
// Check update required banner in case of auto update expiration is visible on the Chrome management page.
conn, err := cr.NewConn(ctx, "chrome://management/")
if err != nil {
s.Fatal("Failed to open management page: ", err)
}
defer conn.Close()
script := "document.querySelector('management-ui').shadowRoot.querySelector('.eol-section') && !document.querySelector('management-ui').shadowRoot.querySelector('.eol-section[hidden]')"
if err := conn.WaitForExprFailOnErr(ctx, script); err != nil {
s.Error("Failed to verify update required end-of-life banner on management page: ", err)
}
}
|
package main
import "fmt"
func main() {
fmt.Println("test")
in := make(chan int, 1)
go func(in chan int) {
defer close(in)
for i := 0; i < 15000; i++ {
fmt.Println("To channel:", i)
in <- i
}
fmt.Println("CLOSED")
}(in)
for v := range in {
fmt.Println("\tFrom channel:", v)
}
}
|
package Products
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"github.com/GAQF202/servidor-rest/Structs"
"github.com/GAQF202/servidor-rest/list"
)
//VARIABLE PARA ALMACENAR LOS PRODUCTOS DEL CARRITO
var CartProducts []CarritoType
//STRUCT PARA BUSQUEDA DE TIENDA
type Buscar_tienda struct {
Departamento string `json:"Departamento"`
Nombre string `json:"Nombre"`
Calificacion int `json:"Calificacion"`
}
var tienda Buscar_tienda
//BUSQUEDA DE TIENDA PARA MOSTRAR LOS PRODUCTOS
func Tienda(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Fprintf(w, "Inserte una tienda existente")
}
w.Header().Set("Content-type", "application/json")
w.WriteHeader(http.StatusCreated)
json.Unmarshal([]byte(reqBody), &tienda)
res := Get_Producto(tienda)
PosicionTiendaActual = res
//JSON DE RESPUESTA
json.NewEncoder(w).Encode(res)
}
type TiendaEcontrada struct {
Productos []struct{}
}
//OBTIENE LOS PRODUCTOS DE LA TIENDA EN ESPECIFICO
func Get_Producto(tienda Buscar_tienda) list.InventoryType {
Position := list.Get_position(tienda.Departamento, tienda.Nombre, tienda.Calificacion)
PosicionVectorActual = Position
return list.JsonInventory(tienda.Nombre, tienda.Calificacion, list.GlobalVector[Position], tienda.Departamento)
}
//FUNCION PARA RECIBIR LAS COMPRAS DEL CARRITO
var carrito CarritoType
func Cobrar(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Fprintf(w, "Inserte una tienda existente")
}
w.Header().Set("Content-type", "application/json")
w.WriteHeader(http.StatusCreated)
json.Unmarshal([]byte(reqBody), &carrito)
//fmt.Println(carrito)
removeProd(CartProducts)
//AL TERMINAR LA COMPRA EL CARRITO SE QUEDA VACIO
CartProducts = nil
}
func Comprados(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Fprintf(w, "Inserte una tienda existente")
}
w.Header().Set("Content-type", "application/json")
w.WriteHeader(http.StatusCreated)
json.Unmarshal([]byte(reqBody), &carrito)
json.NewEncoder(w).Encode(CartProducts)
}
//RESTA LOS PEDIDOS SELECCIONADOS EN EL CARRITO DE COMPRAS
func removeProd(inventory []CarritoType) {
//SE ALMACENAN LOS PRODUCTOS OBTENIDOS
for i := 0; i < len(inventory); i++ {
//BUSCA LA POSICION DE LA TIENDA
Position := list.Get_position(inventory[i].Departamento, inventory[i].Tienda, inventory[i].Calificacion)
for j := 0; j < len(inventory[i].Productos); j++ {
p := inventory[i].Productos[j]
producto := Structs.Product{p.Nombre, p.Codigo, p.Descripcion, p.Precio, p.Cantidad, p.Imagen, p.Almacenamiento}
list.Delete_product(inventory[i].Tienda, inventory[i].Calificacion, list.GlobalVector[Position], producto)
}
}
}
//FUNCION PARA RECIBIR LAS COMPRAS DEL CARRITO
var elim CarritoType
func Delete_Select(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
reqBody, err := ioutil.ReadAll(r.Body)
if err != nil {
fmt.Fprintf(w, "Inserte una tienda existente")
}
w.Header().Set("Content-type", "application/json")
w.WriteHeader(http.StatusCreated)
json.Unmarshal([]byte(reqBody), &elim)
json.NewEncoder(w).Encode(elim)
/*for i := 0; i < len(CartProducts); i++ {
for j := 0; j < len(elim.Productos); j++ {
//BUSQUEDA DE TIENDA Y PRODUCTO
if CartProducts[i].Tienda == elim.Tienda && CartProducts[i].Calificacion == elim.Calificacion && CartProducts[i].Departamento == elim.Departamento && CartProducts[i].Productos[j].Codigo == elim.Productos[j].Codigo {
fmt.Println(CartProducts[i])
CartProducts[i].Productos[j].Cantidad = 0
}
}
}*/
}
var Ruta []string
//FUNCION PARA CALCULAR EL CAMINO MAS CORTO
func CalcularRecorrido(grafo Graph, paradas []string, inicio string, inicioAbs string, fin string) {
var paradasTemporal []ByWay
//SE GUARDAN TODAS LAS POSIBLES RUTAS DESDE EL DESTINO PARAMETRO
for _, est := range paradas {
ParadaActual := grafo.GetPath(inicio, est)
paradasTemporal = append(paradasTemporal, ParadaActual)
}
var paradaAInsertar ByWay
//SE BUSCA QUE RUTA ES MAS CORTA DESDE EL DESTINO PARAMETRO
for _, parada := range paradasTemporal {
if paradaAInsertar.PesoTotal < parada.PesoTotal {
paradaAInsertar = parada
}
}
//SE INSERTA EN LA VARIABLE GLOBAL DE RUTAS LA RUTA MENOR
for i := 0; i < len(paradaAInsertar.Estaciones); i++ {
Ruta = append(Ruta, paradaAInsertar.Estaciones[i])
}
//SI AUN HAY RUTAS POR RECORRER SE REPITE EL PROCESO
if len(paradas) != 0 {
NuevoSlice := remove(paradas, 0)
CalcularRecorrido(grafo, NuevoSlice, paradaAInsertar.Estaciones[1], inicioAbs, fin)
} else {
ParadaActual := grafo.GetPath(Ruta[len(Ruta)-1], fin)
for i := 0; i < len(ParadaActual.Estaciones); i++ {
Ruta = append(Ruta, ParadaActual.Estaciones[i])
}
Regreso := grafo.GetPath(fin, inicioAbs)
for i := 0; i < len(Regreso.Estaciones); i++ {
Ruta = append(Ruta, Regreso.Estaciones[i])
}
}
}
func remove(s []string, i int) []string {
s[i] = s[len(s)-1]
return s[:len(s)-1]
}
var MapaGlobal Structs.Rec
func RealizarRecorrido(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Content-type", "application/json")
w.WriteHeader(http.StatusCreated)
//JSON DE RESPUESTA
json.NewEncoder(w).Encode(Ruta)
GraficarRuta()
}
func GraficarRuta() {
dot := ""
for i := 0; i < len(MapaGlobal.Nodos); i++ {
dot += MapaGlobal.Nodos[i].Nombre + " [label=\"" + MapaGlobal.Nodos[i].Nombre + "\"];\n"
for j := 0; j < len(MapaGlobal.Nodos[i].Enlaces); j++ {
dot += MapaGlobal.Nodos[i].Nombre + " -- " + MapaGlobal.Nodos[i].Enlaces[j].Nombre + "[label = " + strconv.Itoa(MapaGlobal.Nodos[i].Enlaces[j].Distancia) + "];\n"
}
}
for i := 0; i < len(Ruta); i++ {
if i < len(Ruta)-1 {
if Ruta[i] != Ruta[i+1] {
dot += Ruta[i] + "--" + Ruta[i+1] + "[color=\"#f02c2c\"];"
}
}
}
ReporteRecorrido(dot)
}
|
package main
import "fmt"
type person struct {
first string
last string
age int
}
type human interface {
speak()
}
func (p *person) speak() {
fmt.Println("Hello, my name is", p.first, p.last)
}
func saySomething(h human) {
h.speak()
}
func main() {
p1 := person{
first: "Jonathan",
last: "Thompson",
age: 34,
}
p1.speak()
// saySomething(p1) Compiler error: Requires a pointer
saySomething(&p1) // No compiler error
}
|
package utils
import (
"github.com/influxdata/influxdb/client/v2"
"log"
"sync/atomic"
"time"
"zpush/conf"
)
type ServerStats struct {
StartTime time.Time
MsgIn uint64
MsgOut uint64
}
var stats ServerStats
func (s *ServerStats) RecordStart() {
s.StartTime = time.Now()
go s.ReportStats()
}
func (s *ServerStats) RecordMsgIn() {
atomic.AddUint64(&s.MsgIn, 1)
}
func (s *ServerStats) RecordMsgOut() {
atomic.AddUint64(&s.MsgOut, 1)
}
func Stats() *ServerStats {
return &stats
}
func (s *ServerStats) ReportStats(){
tick := time.NewTicker(time.Second * 5)
for{
select{
case <- tick.C:
go s.Report()
}
}
}
func (s *ServerStats) Report() {
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: conf.Config().Influxdb.Address,
Username: conf.Config().Influxdb.Username,
Password: conf.Config().Influxdb.Password,
})
if err != nil {
log.Fatal(err)
}
// Create a new point batch
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: conf.Config().Influxdb.DB,
Precision: "s",
})
if err != nil {
log.Fatal(err)
}
fields := map[string]interface{}{
"msg_in": float32(s.MsgIn),
"msg_out": float32(s.MsgOut),
}
pt, err := client.NewPoint("zpush_msg", nil, fields)
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
// Write the batch
if err := c.Write(bp); err != nil {
log.Fatal(err)
}
log.Println("report stats to influxdb success")
}
|
package collector
import (
"fmt"
"os"
"path"
"strings"
"testing"
"github.com/arunvelsriram/sftp-exporter/pkg/constants/viperkeys"
"github.com/arunvelsriram/sftp-exporter/pkg/internal/mocks"
"github.com/golang/mock/gomock"
"github.com/kr/fs"
"github.com/pkg/sftp"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/spf13/afero"
"github.com/spf13/viper"
"github.com/stretchr/testify/suite"
log "github.com/sirupsen/logrus"
)
// Refer: https://github.com/kr/fs/blob/main/filesystem.go
type memKrFs struct {
memFs afero.Fs
}
func (m memKrFs) ReadDir(dirname string) ([]os.FileInfo, error) {
if strings.EqualFold("/errorpath", dirname) {
return nil, fmt.Errorf("error reading directory")
}
return afero.ReadDir(m.memFs, dirname)
}
func (m memKrFs) Lstat(name string) (os.FileInfo, error) {
return m.memFs.Stat(name)
}
func (m memKrFs) Join(elem ...string) string {
return path.Join(elem...)
}
type SFTPCollectorSuite struct {
suite.Suite
ctrl *gomock.Controller
sftpClient *mocks.MockSFTPClient
collector prometheus.Collector
}
func TestSFTPCollectorSuite(t *testing.T) {
suite.Run(t, new(SFTPCollectorSuite))
}
func (s *SFTPCollectorSuite) SetupTest() {
log.SetLevel(log.DebugLevel)
s.ctrl = gomock.NewController(s.T())
s.sftpClient = mocks.NewMockSFTPClient(s.ctrl)
s.collector = NewSFTPCollector(s.sftpClient)
}
func (s *SFTPCollectorSuite) TearDownTest() {
s.ctrl.Finish()
}
func (s *SFTPCollectorSuite) TestSFTPCollectorDescribe() {
ch := make(chan *prometheus.Desc)
go s.collector.Describe(ch)
up := <-ch
s.Equal(`Desc{fqName: "sftp_up", help: "Tells if exporter is able to connect to SFTP", `+
`constLabels: {}, variableLabels: []}`,
up.String(),
)
fsTotalSpace := <-ch
s.Equal(`Desc{fqName: "sftp_filesystem_total_space_bytes", `+
`help: "Total space in the filesystem containing the path", constLabels: {}, variableLabels: [path]}`,
fsTotalSpace.String(),
)
fsFreeSpace := <-ch
s.Equal(`Desc{fqName: "sftp_filesystem_free_space_bytes", `+
`help: "Free space in the filesystem containing the path", constLabels: {}, variableLabels: [path]}`,
fsFreeSpace.String(),
)
objectCount := <-ch
s.Equal(
`Desc{fqName: "sftp_objects_available", `+
`help: "Number of objects in the path", constLabels: {}, variableLabels: [path]}`,
objectCount.String(),
)
objectSize := <-ch
s.Equal(
`Desc{fqName: "sftp_objects_total_size_bytes", `+
`help: "Total size of all the objects in the path", constLabels: {}, variableLabels: [path]}`,
objectSize.String(),
)
}
func (s *SFTPCollectorSuite) TestSFTPCollectorCollectShouldWriteUpMetric() {
viper.Set(viperkeys.SFTPPaths, []string{})
s.sftpClient.EXPECT().Connect().Return(nil)
s.sftpClient.EXPECT().Close().Return(nil)
ch := make(chan prometheus.Metric)
done := make(chan bool)
go func() {
s.collector.Collect(ch)
done <- true
}()
up := <-ch
metric := dto.Metric{}
desc := up.Desc()
_ = up.Write(&metric)
s.Equal(`Desc{fqName: "sftp_up", help: "Tells if exporter is able to connect to SFTP", `+
`constLabels: {}, variableLabels: []}`, desc.String())
s.Equal(1.0, metric.GetGauge().GetValue())
<-done
}
func (s *SFTPCollectorSuite) TestSFTPCollectorCollectShouldWriteUpMetricAndReturnIfClientCreationFails() {
viper.Set(viperkeys.SFTPPaths, []string{})
s.sftpClient.EXPECT().Connect().Return(fmt.Errorf("failed to connect to SFTP"))
ch := make(chan prometheus.Metric)
done := make(chan bool)
go func() {
s.collector.Collect(ch)
done <- true
}()
up := <-ch
metric := dto.Metric{}
desc := up.Desc()
_ = up.Write(&metric)
s.Equal(`Desc{fqName: "sftp_up", help: "Tells if exporter is able to connect to SFTP", `+
`constLabels: {}, variableLabels: []}`, desc.String())
s.Equal(0.0, metric.GetGauge().GetValue())
<-done
}
func (s *SFTPCollectorSuite) TestSFTPCollectorCollectShouldWriteFSMetrics() {
viper.Set(viperkeys.SFTPPaths, []string{"/path0", "/path1"})
memFs := afero.NewMemMapFs()
_ = memFs.MkdirAll("/path0", 0755)
_ = memFs.MkdirAll("/path1", 0755)
path0Walker := fs.WalkFS("/path0", memKrFs{memFs: memFs})
path1Walker := fs.WalkFS("/path1", memKrFs{memFs: memFs})
s.sftpClient.EXPECT().Connect().Return(nil)
s.sftpClient.EXPECT().StatVFS("/path0").Return(&sftp.StatVFS{Frsize: 10, Blocks: 1000, Bfree: 100}, nil)
s.sftpClient.EXPECT().StatVFS("/path1").Return(&sftp.StatVFS{Frsize: 5, Blocks: 1000, Bfree: 500}, nil)
s.sftpClient.EXPECT().Walk("/path0").Return(path0Walker)
s.sftpClient.EXPECT().Walk("/path1").Return(path1Walker)
s.sftpClient.EXPECT().Close()
ch := make(chan prometheus.Metric)
done := make(chan bool)
go func() {
s.collector.Collect(ch)
done <- true
}()
<-ch
metric := &dto.Metric{}
var desc *prometheus.Desc
totalSpace1 := <-ch
desc = totalSpace1.Desc()
_ = totalSpace1.Write(metric)
s.Equal(`Desc{fqName: "sftp_filesystem_total_space_bytes", help: "Total space in the filesystem containing the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(10000.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path0", metric.GetLabel()[0].GetValue())
freeSpace1 := <-ch
desc = freeSpace1.Desc()
_ = freeSpace1.Write(metric)
s.Equal(`Desc{fqName: "sftp_filesystem_free_space_bytes", help: "Free space in the filesystem containing the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(1000.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path0", metric.GetLabel()[0].GetValue())
totalSpace2 := <-ch
desc = totalSpace2.Desc()
_ = totalSpace2.Write(metric)
s.Equal(`Desc{fqName: "sftp_filesystem_total_space_bytes", help: "Total space in the filesystem containing the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(5000.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path1", metric.GetLabel()[0].GetValue())
freeSpace2 := <-ch
desc = freeSpace2.Desc()
_ = freeSpace2.Write(metric)
s.Equal(`Desc{fqName: "sftp_filesystem_free_space_bytes", help: "Free space in the filesystem containing the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(2500.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path1", metric.GetLabel()[0].GetValue())
<-ch
<-ch
<-ch
<-ch
<-done
}
func (s *SFTPCollectorSuite) TestSFTPCollectorCollectShouldNotWriteFSMetricsOnError() {
viper.Set(viperkeys.SFTPPaths, []string{"/path0"})
memFs := afero.NewMemMapFs()
_ = memFs.MkdirAll("/path0", 0755)
path0Walker := fs.WalkFS("/path0", memKrFs{memFs: memFs})
s.sftpClient.EXPECT().Connect().Return(nil)
s.sftpClient.EXPECT().StatVFS("/path0").Return(nil, fmt.Errorf("failed to get VFS stats"))
s.sftpClient.EXPECT().Walk("/path0").Return(path0Walker)
s.sftpClient.EXPECT().Close()
ch := make(chan prometheus.Metric)
done := make(chan bool)
go func() {
s.collector.Collect(ch)
done <- true
}()
m1 := <-ch
s.NotContains(m1.Desc().String(), "filesystem_total_space_bytes")
s.NotContains(m1.Desc().String(), "filesystem_free_space_bytes")
m2 := <-ch
s.NotContains(m2.Desc().String(), "filesystem_total_space_bytes")
s.NotContains(m2.Desc().String(), "filesystem_free_space_bytes")
m3 := <-ch
s.NotContains(m3.Desc().String(), "filesystem_total_space_bytes")
s.NotContains(m3.Desc().String(), "filesystem_free_space_bytes")
close(ch)
<-done
}
func (s *SFTPCollectorSuite) TestSFTPCollectorCollectShouldWriteObjectMetrics() {
viper.Set(viperkeys.SFTPPaths, []string{"/path0", "/path1"})
memFs := afero.NewMemMapFs()
_ = memFs.MkdirAll("/path0/1/a", 0755)
_ = afero.WriteFile(memFs, "/path0/0.txt", []byte("0"), 0644)
_ = afero.WriteFile(memFs, "/path0/1/1.txt", []byte("1"), 0644)
_ = afero.WriteFile(memFs, "/path0/1/a/1a.txt", []byte("1a"), 0644)
_ = memFs.MkdirAll("/path1/empty-dir", 0755)
_ = afero.WriteFile(memFs, "/path1/1.txt", []byte("helloworld"), 0644)
path0Walker := fs.WalkFS("/path0", memKrFs{memFs: memFs})
path1Walker := fs.WalkFS("/path1", memKrFs{memFs: memFs})
s.sftpClient.EXPECT().Connect().Return(nil)
s.sftpClient.EXPECT().StatVFS("/path0").Return(&sftp.StatVFS{}, nil)
s.sftpClient.EXPECT().StatVFS("/path1").Return(&sftp.StatVFS{}, nil)
s.sftpClient.EXPECT().Walk("/path0").Return(path0Walker)
s.sftpClient.EXPECT().Walk("/path1").Return(path1Walker)
s.sftpClient.EXPECT().Close()
ch := make(chan prometheus.Metric)
done := make(chan bool)
go func() {
s.collector.Collect(ch)
done <- true
}()
<-ch
<-ch
<-ch
<-ch
<-ch
metric := &dto.Metric{}
var desc *prometheus.Desc
objectCount1 := <-ch
desc = objectCount1.Desc()
_ = objectCount1.Write(metric)
s.Equal(`Desc{fqName: "sftp_objects_available", help: "Number of objects in the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(3.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path0", metric.GetLabel()[0].GetValue())
objectSize1 := <-ch
desc = objectSize1.Desc()
_ = objectSize1.Write(metric)
s.Equal(`Desc{fqName: "sftp_objects_total_size_bytes", help: "Total size of all the objects in the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(4.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path0", metric.GetLabel()[0].GetValue())
objectCount2 := <-ch
desc = objectCount2.Desc()
_ = objectCount2.Write(metric)
s.Equal(`Desc{fqName: "sftp_objects_available", help: "Number of objects in the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(1.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path1", metric.GetLabel()[0].GetValue())
objectSize2 := <-ch
desc = objectSize2.Desc()
_ = objectSize2.Write(metric)
s.Equal(`Desc{fqName: "sftp_objects_total_size_bytes", help: "Total size of all the objects in the path", `+
`constLabels: {}, variableLabels: [path]}`, desc.String())
s.Equal(10.0, metric.GetGauge().GetValue())
s.Equal("path", metric.GetLabel()[0].GetName())
s.Equal("/path1", metric.GetLabel()[0].GetValue())
<-done
}
func (s *SFTPCollectorSuite) TestSFTPCollectorCollectShouldNotWriteObjectMetricsOnError() {
viper.Set(viperkeys.SFTPPaths, []string{"/errorpath"})
memFs := afero.NewMemMapFs()
_ = memFs.MkdirAll("/errorpath", 0755)
_ = afero.WriteFile(memFs, "/errorpath/file.txt", []byte("helloworld"), 0000)
walker := fs.WalkFS("/errorpath", memKrFs{memFs: memFs})
s.sftpClient.EXPECT().Connect().Return(nil)
s.sftpClient.EXPECT().StatVFS("/errorpath").Return(&sftp.StatVFS{}, nil)
s.sftpClient.EXPECT().Walk("/errorpath").Return(walker)
s.sftpClient.EXPECT().Close()
ch := make(chan prometheus.Metric)
done := make(chan bool)
go func() {
s.collector.Collect(ch)
done <- true
}()
m1 := <-ch
s.NotContains(m1.Desc().String(), "objects_available")
s.NotContains(m1.Desc().String(), "objects_total_size_bytes")
m2 := <-ch
s.NotContains(m2.Desc().String(), "objects_available")
s.NotContains(m2.Desc().String(), "objects_total_size_bytes")
m3 := <-ch
s.NotContains(m3.Desc().String(), "objects_available")
s.NotContains(m3.Desc().String(), "objects_total_size_bytes")
close(ch)
<-done
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.