text stringlengths 11 4.05M |
|---|
package image
import (
"io"
"os"
"time"
"github.com/dnephin/dobi/tasks/context"
docker "github.com/fsouza/go-dockerclient"
)
// RunPull builds or pulls an image if it is out of date
func RunPull(ctx *context.ExecuteContext, t *Task, _ bool) (bool, error) {
record, err := getImageRecord(recordPath(ctx, t.config))
switch {
case !t.config.Pull.Required(record.LastPull):
t.logger().Debugf("Pull not required")
return false, nil
case err != nil:
t.logger().Warnf("Failed to get image record: %s", err)
}
pullTag := func(tag string) error {
return pullImage(ctx, t, tag)
}
if err := t.ForEachTag(ctx, pullTag); err != nil {
return false, err
}
image, err := GetImage(ctx, t.config)
if err != nil {
return false, err
}
record = imageModifiedRecord{LastPull: now(), ImageID: image.ID}
if err := updateImageRecord(recordPath(ctx, t.config), record); err != nil {
t.logger().Warnf("Failed to update image record: %s", err)
}
t.logger().Info("Pulled")
return true, nil
}
func now() *time.Time {
now := time.Now()
return &now
}
func pullImage(ctx *context.ExecuteContext, t *Task, imageTag string) error {
registry := parseAuthRepo(t.config.Image)
repo, tag := docker.ParseRepositoryTag(imageTag)
return Stream(os.Stdout, func(out io.Writer) error {
return ctx.Client.PullImage(docker.PullImageOptions{
Repository: repo,
Tag: tag,
OutputStream: out,
RawJSONStream: true,
// TODO: timeout
}, ctx.GetAuthConfig(registry))
})
}
|
package main
import (
"github.com/gorilla/websocket"
"log"
"time"
)
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
var (
newline = []byte{'\n'}
space = []byte{' '}
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
type Guest struct {
id string
room *Room
conn *websocket.Conn
send chan []byte
}
func (g *Guest) readSocket() {
defer func() {
g.room.unregister <- g
g.conn.Close()
}()
g.conn.SetReadLimit(maxMessageSize)
g.conn.SetReadDeadline(time.Now().Add(pongWait))
g.conn.SetPongHandler(func(string) error {
g.conn.SetReadDeadline(time.Now().Add(pongWait))
return nil
})
for {
_, message, err := g.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
log.Printf("error: %v", err)
}
break
}
g.room.broadcast <- message
}
}
func (g *Guest) writeSocket() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
g.conn.Close()
}()
for {
select {
case message, ok := <-g.send:
g.conn.SetWriteDeadline(time.Now().Add(writeWait))
if !ok {
// room closed the channel
g.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
// TODO: look into how much overhead there is to decode messages
// since we just pass them on, we really don't need to decode
// at all.. unless we are gonna hold pub keys and drop unknown
// senders
w, err := g.conn.NextWriter(websocket.TextMessage) // TODO: BinaryMessage?
if err != nil {
return
}
w.Write(message)
// n := len(g.send)
// for i := 0; i < n; i++ {
// w.Write(<-g.send)
// }
if err := w.Close(); err != nil {
return
}
case <-ticker.C:
g.conn.SetWriteDeadline(time.Now().Add(writeWait))
if err := g.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
|
package trea
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00400102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:trea.004.001.02 Document"`
Message *CreateNonDeliverableForwardValuationV02 `xml:"CretNDFValtnV02"`
}
func (d *Document00400102) AddMessage() *CreateNonDeliverableForwardValuationV02 {
d.Message = new(CreateNonDeliverableForwardValuationV02)
return d.Message
}
// Scope
// The CreateNonDeliverableForwardValuation message is sent by a participant to a central system or to a counterparty to notify the valuation of a non deliverable trade.
// Usage
// The two trading parties will both send similar notifications to the central settlement system and the central settlement system will send notifications to both.
type CreateNonDeliverableForwardValuationV02 struct {
// Provides identification and date of the valuation of the non deliverable trade which is created.
TradeInformation *iso20022.TradeAgreement1 `xml:"TradInf"`
// Specifies the trading side of the valuation of the non deliverable trade which is created.
TradingSideIdentification *iso20022.TradePartyIdentification3 `xml:"TradgSdId"`
// Specifies the counterparty of the valuation of the non deliverable trade which is created.
CounterpartySideIdentification *iso20022.TradePartyIdentification3 `xml:"CtrPtySdId"`
// Specifies the amounts of the valuation of the non deliverable trade which is created.
TradeAmounts *iso20022.AmountsAndValueDate1 `xml:"TradAmts"`
// Specifies the valuation information of the valuation of the non deliverable trade which is created.
ValuationInformation *iso20022.ValuationData2 `xml:"ValtnInf"`
// Specifies the valuation rate of the valuation of the non deliverable trade which is created.
ValuationRate *iso20022.AgreedRate1 `xml:"ValtnRate"`
}
func (c *CreateNonDeliverableForwardValuationV02) AddTradeInformation() *iso20022.TradeAgreement1 {
c.TradeInformation = new(iso20022.TradeAgreement1)
return c.TradeInformation
}
func (c *CreateNonDeliverableForwardValuationV02) AddTradingSideIdentification() *iso20022.TradePartyIdentification3 {
c.TradingSideIdentification = new(iso20022.TradePartyIdentification3)
return c.TradingSideIdentification
}
func (c *CreateNonDeliverableForwardValuationV02) AddCounterpartySideIdentification() *iso20022.TradePartyIdentification3 {
c.CounterpartySideIdentification = new(iso20022.TradePartyIdentification3)
return c.CounterpartySideIdentification
}
func (c *CreateNonDeliverableForwardValuationV02) AddTradeAmounts() *iso20022.AmountsAndValueDate1 {
c.TradeAmounts = new(iso20022.AmountsAndValueDate1)
return c.TradeAmounts
}
func (c *CreateNonDeliverableForwardValuationV02) AddValuationInformation() *iso20022.ValuationData2 {
c.ValuationInformation = new(iso20022.ValuationData2)
return c.ValuationInformation
}
func (c *CreateNonDeliverableForwardValuationV02) AddValuationRate() *iso20022.AgreedRate1 {
c.ValuationRate = new(iso20022.AgreedRate1)
return c.ValuationRate
}
|
// Package codecs contains types that relate to brewnet codecs. They
// can be used in a response structure to help the codec understand
// what it should be building - for example, including a Link type in
// the response will allow all brewnet codecs to format the link how
// they need to to support their specific MIME type.
package codecs
// A Link is a type that contains a link to another resource.
type Link struct {
// Location is the location of the linked resource.
Location string
// Relationship is an identifier describing how this link relates
// to the type that contains it. The relationship should be all
// lower case and have words separated with "-" characters.
Relationship string
// Title is a human-readable description of the resource linked
// to and how it relates to the type that contains this link. If
// Title is empty, it can be generated using the Relationship
// value, replacing "-" characters with " " characters and
// converting the first word or all words (as needed) to title
// case.
Title string
// Distance is a value between 0 and 1 that represents how distant
// the relationship between the containing type and the linked
// type is. If the linked type should always be embedded in the
// containing type's data, the distance should be 0. If the
// linked type should always be a simple link or button, never a
// dropdown or embedded element, the distance should be 1. It's
// up to the UI to decide what the values in-between mean (i.e. a
// value of 0.8 might result in a slide-in element in a
// high-resolution web browser, but a link to another page in a
// phone app).
Distance float32
}
// An Image is a type that contains a link to an image.
type Image struct {
Link
// MIME is the MIME type of the linked image.
MIME string
}
// A Title includes a string of characters and a rank.
type Title struct {
// Value should be the title's string value.
Value string
// Rank should represent the title's importance to the containing
// context. A rank of 0 will be emphasized more than a rank of 1.
Rank int
}
|
//多个服务器和客户端。客户端向服务器发出不同的请求
//一个中心节点用于审批节点的加入离开和网络的维护
//根据报文头来分类处理不同的消息(handle),用json来序列化结构体
//0.简单字符串[default]
//1.请求加入:id listenport————添加表单并转发(dial)&&把表单整体传递给新节点(datatrans)&&通知节点已加入网络[1]
// 可能的错误:id重复,服务器不存在
//2.请求离开:id listenport————删除表单并转发[2]
//3.来自转发的加入/离开请求:id listenport————仅添加/删除表单[3,4]
//4.连通确认[5]
//5.复杂的高级消息:id num1 num2 time etc.[6,7...]
//另:
//反馈确认函数(check)
//超时处理函数(time)
//心跳检查函数(heartbeating)
//错误检查函数(checkerr)
//建立本地数据库(mysql)
package main
import (
"encoding/json"
"fmt"
"net"
"os"
)
//定义节点信息结构体info
type Info struct {
Id string
Host string
}
type trade struct {
From string
To string
Quantity int
Sort string
}
type clidata struct {
Host string
First int
Second int
}
type message struct {
Kind string
Verifykey bool
Jlmessage *Info
Peerinfo map[string]Info
Tradeinfo *trade
}
// var i bool
var address, id string
var InfoMap map[string]Info
var CliMap map[string]clidata
//错误检查函数
func checkErr(err error) {
if err != nil {
fmt.Println(err)
}
}
//连接建立函数
func dial(c []byte) {
for _, con := range InfoMap { //根据infomap的信息转发
//返回一个计数值与map的项数对比,判断是否全部传达到?
if con.Host == address {
//跳过自身
continue
}
tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:"+con.Host)
conn, err := net.DialTCP("tcp", nil, tcpAddr) //建立通信
if err != nil {
fmt.Println("err", err)
os.Exit(0)
}
conn.Write(c)
conn.Close()
}
}
//申请函数,向中心节点发去报文,返回一个bool参数
// func request(data []byte, long int) (i bool) {
// tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:10000")
// conn, err := net.DialTCP("tcp", nil, tcpAddr)
// conn.Write(data)
// buff := make([]byte, 128)
// for {
// j, err := conn.Read(buff)
// if err != nil {
// ch <- 1
// break
// }
// if string(buff) == "yes" {
// i = true
// break
// }
// if string(buff) == "no" {
// i = false
// break
// }
// }
// conn.Close()
// return i
// }
//分类处理函数
func handle(data []byte, long int) {
v := &message{}
err := json.Unmarshal(data[:long], v)
checkErr(err)
//加入请求
if v.Kind == "join" {
// i=request(data,long)
if v.Verifykey == true {
InfoMap[v.Jlmessage.Host] = Info{v.Jlmessage.Id, v.Jlmessage.Host}
v.Verifykey = false
//改为转发的标识
b, _ := json.Marshal(v)
dial(b)
tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:"+v.Jlmessage.Host)
conn, err := net.DialTCP("tcp", nil, tcpAddr)
//建立通信
checkErr(err)
v.Kind = "receive"
v.Peerinfo = InfoMap
c, _ := json.Marshal(v)
conn.Write(c)
conn.Close()
}
if v.Verifykey == false {
InfoMap[v.Jlmessage.Host] = Info{v.Jlmessage.Id, v.Jlmessage.Host}
fmt.Println(v.Jlmessage.Id + "joined the web and map updated")
}
}
//接受节点拓扑表
if v.Kind == "receive" {
b, _ := json.Marshal(v.Peerinfo)
err := json.Unmarshal(b, &InfoMap)
fmt.Println("get the map")
checkErr(err)
}
if v.Kind == "leave" {
// i=request(data,long)
if v.Verifykey == true {
delete(InfoMap, v.Jlmessage.Host)
fmt.Println(v.Jlmessage.Id + "want to leave")
v.Verifykey = false
c, _ := json.Marshal(v)
dial(c)
}
if v.Verifykey == false {
delete(InfoMap, v.Jlmessage.Host)
fmt.Println(v.Jlmessage.Id + "leaved the net and map updated")
}
}
}
// 消息接收函数
func chat(tcpConn *net.TCPConn) {
//区分出来自中心节点的消息
ipStr := tcpConn.RemoteAddr().String()
// 中断处理
defer func() {
fmt.Println("disconnected :" + ipStr)
tcpConn.Close()
}()
for {
data := make([]byte, 10000)
total, err := tcpConn.Read(data)
if err != nil {
break
}
if tcpConn.RemoteAddr().String() != "127.0.0.1:10000" {
handle(data, total)
e := []byte("finish")
tcpConn.Write(e)
}
}
}
func main() {
//输入节点信息
fmt.Println("Input your listen port")
fmt.Scanln(&address)
fmt.Println("Input your id")
fmt.Scanln(&id)
tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:"+address)
tcpListener, _ := net.ListenTCP("tcp", tcpAddr)
//建立自身的map表格并初始化,以id为索引
InfoMap = map[string]Info{
address: Info{
id,
address,
},
}
//开始监听消息
for {
tcpConn, _ := tcpListener.AcceptTCP()
defer tcpConn.Close()
fmt.Println("连接的客服端信息:", tcpConn.RemoteAddr().String())
//消息处理
go chat(tcpConn)
}
}
|
package main
import (
"errors"
"fmt"
)
// 定义一个类型
type operate func(int, int) int
func oper(x, y int) int {
return x + y
}
func calculate(x, y int, op operate) (int, error) {
// 卫述语句检查参数
if op == nil {
return 0, errors.New("op is nil")
}
return op(x, y), nil
}
func main() {
r, _ := calculate(1, 2, oper)
fmt.Println(r)
}
|
package chunksmapper
import (
"github.com/AppImageCrafters/libzsync-go/chunks"
"github.com/stretchr/testify/assert"
"testing"
)
func TestFileChunksMapper_GetMissingChunks(t *testing.T) {
mapper := ChunksMapper{
fileSize: 12,
chunksMap: make(map[int64]chunks.ChunkInfo),
}
chunkList := []chunks.ChunkInfo{
chunks.ChunkInfo{TargetOffset: 2, Size: 2},
chunks.ChunkInfo{TargetOffset: 4, Size: 2},
chunks.ChunkInfo{TargetOffset: 8, Size: 2},
}
for _, chunk := range chunkList {
mapper.chunksMap[chunk.TargetOffset] = chunk
}
result := mapper.GetMissingChunks()
expected := []chunks.ChunkInfo{
{TargetOffset: 0, SourceOffset: 0, Size: 2},
{TargetOffset: 6, SourceOffset: 6, Size: 2},
{TargetOffset: 10, SourceOffset: 10, Size: 2},
}
assert.Equal(t, expected, result)
}
|
package parquet_test
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"github.com/segmentio/parquet-go"
)
func Example() {
// parquet-go uses the same struct-tag definition style as JSON and XML
type Contact struct {
Name string `parquet:"name"`
// "zstd" specifies the compression for this column
PhoneNumber string `parquet:"phoneNumber,optional,zstd"`
}
type AddressBook struct {
Owner string `parquet:"owner,zstd"`
OwnerPhoneNumbers []string `parquet:"ownerPhoneNumbers,gzip"`
Contacts []Contact `parquet:"contacts"`
}
f, _ := ioutil.TempFile("", "parquet-example-")
writer := parquet.NewWriter(f)
rows := []AddressBook{
{Owner: "UserA", Contacts: []Contact{
{Name: "Alice", PhoneNumber: "+15505551234"},
{Name: "Bob"},
}},
// Add more rows here.
}
for _, row := range rows {
if err := writer.Write(row); err != nil {
log.Fatal(err)
}
}
_ = writer.Close()
_ = f.Close()
// Now, we can read from the file.
rf, _ := os.Open(f.Name())
pf := parquet.NewReader(rf)
addrs := make([]AddressBook, 0)
for {
var addr AddressBook
err := pf.Read(&addr)
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
addrs = append(addrs, addr)
}
fmt.Println(addrs[0].Owner)
// Output: UserA
}
|
package example
import (
"github.com/alehano/gobootstrap/sys/db/postgres"
"github.com/alehano/gobootstrap/models"
"github.com/jmoiron/sqlx"
"errors"
)
// Implementation of Storage interface
func NewPostgresStorage() Storage {
s := postgresStorage{
db: postgres.GetDB(),
table: tableName,
}
return s
}
const (
tableName = "example"
)
type postgresStorage struct {
db *sqlx.DB
table string
}
func (s postgresStorage) DBInit() error {
s.db.MustExec(`
CREATE TABLE $1 (
id SERIAL PRIMARY KEY,
title VARCHAR(100) NOT NULL,
updated_at TIMESTAMP,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);`, s.table)
return models.WrapSqlErr(errors.New("TODO"))
}
func (s postgresStorage) Create(item ExampleModel) (int, error) {
res, err := s.db.Exec("INSERT INTO $1 (title, updated_at) VALUES ($2, NOW())",
s.table, item.Title)
if err != nil {
return 0, models.WrapSqlErr(err)
}
newId, err := res.LastInsertId()
return int(newId), models.WrapSqlErr(err)
}
func (s postgresStorage) Get(id int) (ExampleModel, error) {
res := ExampleModel{}
err := s.db.Get(&res, "SELECT * FROM $1 WHERE id=$2", s.table, id)
return res, models.WrapSqlErr(err)
}
func (s postgresStorage) Update(item ExampleModel) error {
return models.WrapSqlErr(errors.New("TODO"))
}
func (s postgresStorage) Delete(id int) error {
return models.WrapSqlErr(errors.New("TODO"))
}
|
package functions
import (
"os"
"github.com/mikerybka/github"
)
func UpdateWebmachinedevFrontend() error {
functions, err := AllFunctions()
if err != nil {
return err
}
var files map[string]string
// TODO regenerate /go.mod and /go.sum
for _, function := range functions {
vercelfunctionsrc, err := GenerateVercelFunction(function)
if err != nil {
return err
}
files["api/"+function.ID+"/index.go"] = vercelfunctionsrc
javascriptfunctionsrc, err := GenerateJavascriptFunction(function)
if err != nil {
return err
}
files["functions/"+function.ID+"/index.js"] = javascriptfunctionsrc
}
// TODO
// foreach type
// generate /pages/:type/index.js
// generate /pages/:type/[id].js
// generate /pages/:type/new.js
// generate /pages/:type/[id]/edit.js ???
return github.WriteFiles("mikerybka", "webmachine.dev", "main", files, "automated update", os.Getenv("GITHUB_KEY"))
}
|
/*
Copyright © 2022 NAME HERE <EMAIL ADDRESS>
*/
package cmd
import (
"YNM3000/code/core"
"YNM3000/code/logger"
"YNM3000/code/utils"
"os"
"path"
"github.com/spf13/cobra"
)
// scanCmd represents the scan command
var scanCmd = &cobra.Command{
Use: "scan",
Short: "A brief description of your command",
Long: `A longer description that spans multiple lines and likely contains examples
and usage of using your command. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
Run: runScan,
}
func init() {
//workflow
rootCmd.PersistentFlags().StringVar(&options.Scan.Flow, "flow", "general", "指定workflow")
//workflow
rootCmd.PersistentFlags().StringVar(&options.Scan.FlowFolder, "flowPath", "", "指定workflow的目录")
rootCmd.AddCommand(scanCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// scanCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// scanCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func runScan(_ *cobra.Command, _ []string) {
initScan()
//打印要跑的目标
logger.Info("输入的目标如下: ")
for input := range options.Inputs {
logger.Println(input)
}
for input := range options.Inputs {
logger.Info("开始扫描: ", input)
core.Run(input, options)
}
}
func initScan() {
//设置workflow的folder
if options.Scan.FlowFolder == "" {
options.Scan.FlowFolder = path.Join(options.Paths.Root, "workflow")
if !utils.FolderExists(options.Scan.FlowFolder) {
logger.Info("workflow目录不存在")
os.Exit(1)
}
}
}
|
package main
import "fmt"
func strStr(haystack string, needle string) int {
findlen, inlen := len(needle), len(haystack)
switch {
case findlen == 0:
return 0
case inlen > findlen:
return -1
case findlen == inlen:
if haystack == needle {
return 0
}
return -1
}
if findlen == 0 {
return 0
}
if inlen == 0 {
return -1
}
to := inlen - findlen
for i := 0; i <= to; i++ {
for j := 0; j < findlen; j++ {
if haystack[i+j] != needle[j] {
break
}
if j == findlen-1 {
return i
}
}
}
return -1
}
/**
* Example 1:
* Input: haystack = "hello", needle = "ll"
* Output: 2
*
* Example 2:
* Input: haystack = "aaaaa", needle = "bba"
* Output: -1
*/
func main() {
// fmt.Println(strStr("hello", "ll"))
// fmt.Println(strStr("aaaaa", "bba"))
fmt.Println(strStr("a", "a"))
}
|
package ffprobe
import (
"errors"
"log"
"os"
"reflect"
"strings"
"testing"
)
func init() {
devnull, e := os.Create(os.DevNull)
if e != nil {
panic(e)
}
logi = log.New(devnull, "", log.Lshortfile|log.Ltime)
}
func TestConfInputs(t *testing.T) {
pc := NewProber()
tests := []struct {
plt, name string
want string
}{
{"mac", "a", "avfoundation"},
{"mac", "v", "avfoundation"},
}
for _, tt := range tests {
if got := pc.config.Inputs[tt.plt][tt.name]; !reflect.DeepEqual(got.F, tt.want) {
t.Errorf("conf input got %#v, want %#v", got, tt.want)
}
}
}
func TestConfInputCmds(t *testing.T) {
pc := NewProber()
tests := []struct {
plt string
uiip UIInput
err error
want string
}{
{"mac", UIInput{Type: Audio}, nil, "-f avfoundation -i none:0"},
{"mac", UIInput{Type: Video}, nil, "-f avfoundation -i 0:none"},
}
for _, tt := range tests {
inps, err := pc.inputCmd(pc.config.Inputs[tt.plt], tt.uiip)
if got := strings.Join(inps, " "); !strings.Contains(got, tt.want) || !reflect.DeepEqual(err, tt.err) {
t.Errorf("conf input got %#v, should contain %#v", got, tt.want)
}
}
}
func TestContainer(t *testing.T) {
pc := NewProber()
opts.VidPath = ""
tests := []struct {
name string
avidx int
err error
want string
}{
{"vp9-default", 0, nil, "-map 0:v -c:v vp9"},
{"vp9-default", 0, nil, " 0_0.webm"},
{"vp9-default", 0, nil, "-threads 8"},
{"opus-default", 0, nil, "-map 0:a -c:a libopus 0_0.opus"},
{"wrongpreset", 1, errors.New("unknown preset wrongpreset"), ""},
}
for _, tt := range tests {
cmds, err := pc.presetCmd(tt.name, tt.avidx)
got := strings.Join(cmds, " ")
if !strings.Contains(got, tt.want) || !reflect.DeepEqual(err, tt.err) {
t.Errorf("container cmd %s, got (%#v, %#v) want %#v", tt.name, got, err, tt.want)
}
}
}
func testInputs(pc ProberCommon, ps ...string) []UIInput {
var ips []UIInput
for _, p := range ps {
ips = append(ips, pc.config.newInput(p, pc))
}
return ips
}
type td struct {
fname string
want string
}
func testOptions(pc ProberCommon) map[*Options][]td {
return map[*Options][]td{
&Options{UIInputs: testInputs(pc, "opus-default")}: []td{
{"getRecCmd", "-map 0:a -c:a aac"},
{"getMuxCommand", "-i 0.opus -map 0:a -c copy 20"},
{"getConcatCmd", "[0:a:0][1:a:0]concat=n=2:v=0:a=1[out]"},
{"getConcatCmd", "-i 0_0.aac -i 1_0.aac"},
{"getConcatCmd", "libopus 0.opus"},
},
&Options{UIInputs: testInputs(pc, "vp9-default")}: []td{
{"getRecCmd", "-c:v libx264"},
{"getRecCmd", "-crf 0"},
{"getMuxCommand", "-i 0.webm -map 0:v -c copy "},
{"getConcatCmd", "-i 0_0.mkv"},
{"getConcatCmd", "-c:v vp9"},
{"getConcatCmd", "[0:v:0][1:v:0]concat=n=2:v=1:a=0[out]"},
},
&Options{UIInputs: testInputs(pc, "opus-default", "vp9-default")}: []td{
{"getRecCmd", "-c:a aac 1_0.aac"},
{"getRecCmd", "1_1.mkv"},
{"getMuxCommand", "-i 0.opus -i 1.webm -map 0:a -map 1:v -c copy "},
{"getConcatCmd", "libopus 0.opus"},
{"mkFiles", "0_0.aac 1_0.aac 0.opus 0_1.mkv 1_1.mkv 1.webm"},
},
}
}
func TestCmds(t *testing.T) {
pc := NewProber()
opts.VidPath = ""
pc.config.resumeCount = 1
topts := testOptions(pc)
for opt, tt := range topts {
for _, d := range tt {
t.Run(d.fname, func(t *testing.T) {
var cmds []string
var err error
switch d.fname {
case "getRecCmd":
cmds, err = pc.getRecCmd("mac", *opt)
case "getMuxCommand":
cmds, err = pc.getMuxCommand(*opt)
case "getConcatCmd":
cmds, err = pc.getConcatCmd(*opt, 0)
case "mkFiles":
pc.config.tmpFiles = nil
_, err = pc.getConcatCmd(*opt, 0)
_, err = pc.getConcatCmd(*opt, 1)
cmds = pc.config.tmpFiles
}
if err != nil {
t.Error(err)
}
if got := strings.Join(cmds, " "); !strings.Contains(got, d.want) {
t.Errorf("%s(%v)\n got %#v\n should contain %#v", d.fname, *opt, got, d.want)
}
})
}
}
}
|
package laws
import (
"fmt"
"os"
"path/filepath"
"regexp"
)
// A Law describes the expected values of a file header in regular expressions.
type Law struct {
Expected regexp.Regexp
}
// RetrieveFrom extracts a Law from filepath.
// If filepath is empty, it attempts to find a Law file at
// the current working directory or, if not found, returns an error.
// Otherwise, an error is returned if filepath can not be opened, is of
// the wrong file type, or contains invalid data.
func RetrieveFrom(filepath string) (law Law, err error) {
if filepath == "" {
fmt.Print("searching for the law...")
filepath, err = findFile()
if err != nil {
fmt.Print("\n")
return law, err
}
fmt.Printf("found %v\n", filepath)
}
file, err := os.Open(filepath)
defer file.Close()
if err != nil {
return law, err
}
return retrieveLaw(file)
}
func findFile() (string, error) {
path, _ := os.Getwd()
path = filepath.Join(path, "law.*")
matches, _ := filepath.Glob(path)
if len(matches) == 0 {
return "", fmt.Errorf(
`unable to find any "law" file in the current directory` + "\n" +
`enter "lawyer help law" for more information`)
}
return matches[0], nil
}
func retrieveLaw(file *os.File) (Law, error) {
ext := filepath.Ext(file.Name())
switch ext {
case ".yml", ".yaml":
return retrieveFromYaml(file)
default:
return Law{}, fmt.Errorf(
`law file format "%v" not supported`+"\n"+
`enter "lawyer help law" for more information`, ext)
}
}
|
package swarm_test
import (
"context"
"testing"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
pstore "gx/ipfs/QmQFFp4ntkd4C14sP3FaH9WJyBuetuGUVo6dShNHvnoEvC/go-libp2p-peerstore"
testutil "gx/ipfs/QmVnJMgafh5MBYiyqbvDtoCL8pcQvbEGD2k9o9GFpBWPzY/go-testutil"
)
func TestDialBadAddrs(t *testing.T) {
m := func(s string) ma.Multiaddr {
maddr, err := ma.NewMultiaddr(s)
if err != nil {
t.Fatal(err)
}
return maddr
}
ctx := context.Background()
s := makeSwarms(ctx, t, 1)[0]
test := func(a ma.Multiaddr) {
p := testutil.RandPeerIDFatal(t)
s.Peerstore().AddAddr(p, a, pstore.PermanentAddrTTL)
if _, err := s.DialPeer(ctx, p); err == nil {
t.Errorf("swarm should not dial: %s", p)
}
}
test(m("/ip6/fe80::1")) // link local
test(m("/ip6/fe80::100")) // link local
test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp
}
|
package client
import (
"encoding/json"
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
"github.com/taglme/nfc-goclient/pkg/models"
)
var upgrader = websocket.Upgrader{}
func echo(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
defer c.Close()
for {
//_, _, err := c.ReadMessage()
//if err != nil {
// log.Printf(err.Error())
// break
//}
resp, err := json.Marshal(models.EventResource{
EventID: "123",
Name: models.EventNameAdapterDiscovery.String(),
AdapterID: "123",
AdapterName: "aname",
Data: nil,
CreatedAt: "2006-01-02T15:04:05Z",
})
if err != nil {
log.Fatal("Can't marshall test model", err)
}
err = c.WriteMessage(websocket.TextMessage, resp)
if err != nil {
break
}
}
}
func echoForErr(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
defer c.Close()
for {
//_, _, err := c.ReadMessage()
//if err != nil {
// log.Printf(err.Error())
// break
//}
err = c.WriteMessage(websocket.TextMessage, []byte("Different model"))
if err != nil {
break
}
}
}
func echoLocales(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
defer c.Close()
for {
_, msg, err := c.ReadMessage()
if err != nil {
log.Printf(string(msg), err.Error())
break
}
//err = c.WriteMessage(websocket.TextMessage, []byte("Different model"))
//if err != nil {
// break
//}
}
}
func TestWsService_ConnString(t *testing.T) {
s := newWsService("url")
url := s.ConnString()
assert.Equal(t, "url/ws", url)
}
func TestWsService_Connect(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(echo))
defer s.Close()
u := "ws" + strings.TrimPrefix(s.URL, "http")
// Connect to the server
ws := newWsService(u)
assert.Equal(t, false, ws.IsConnected())
err := ws.Connect()
assert.Nil(t, err)
assert.Equal(t, true, ws.IsConnected())
err = ws.Disconnect()
assert.Nil(t, err)
assert.Equal(t, false, ws.IsConnected())
//ws, _, err := websocket.DefaultDialer.Dial(u, nil)
//if err != nil {
// t.Fatalf("%v", err)
//}
//defer ws.Close()
//
//// Send message to server, read response and check to see if it's what we expect.
//for i := 0; i < 10; i++ {
// if err := ws.WriteMessage(websocket.TextMessage, []byte("hello")); err != nil {
// t.Fatalf("%v", err)
// }
// _, p, err := ws.ReadMessage()
// if err != nil {
// t.Fatalf("%v", err)
// }
// if string(p) != "hello" {
// t.Fatalf("bad message")
// }
//}
}
func TestWsService_OnEvent(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(echo))
defer s.Close()
u := "ws" + strings.TrimPrefix(s.URL, "http")
// Connect to the server
ws := newWsService(u)
err := ws.Connect()
if err != nil {
log.Fatal("Can't connect to test server")
}
a := 0
l := func(e models.Event) {
// change value to validate if handler is working
assert.Equal(t, models.EventNameAdapterDiscovery, e.Name)
a++
}
ws.OnEvent(l)
// sleep to let handler work
time.Sleep(time.Second)
assert.Less(t, 0, a)
}
func TestWsService_OnError(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(echoForErr))
defer s.Close()
u := "ws" + strings.TrimPrefix(s.URL, "http")
// Connect to the server
ws := newWsService(u)
err := ws.Connect()
if err != nil {
log.Fatal("Can't connect to test server")
}
a := 0
l := func(e error) {
// change value to validate if handler is working
//assert.Equal(t, models.EventNameAdapterDiscovery, e.Name)
assert.EqualError(t, e, "Can't unmarshall event resource: invalid character 'D' looking for beginning of value")
a++
}
ws.OnError(l)
// sleep to let handler work
time.Sleep(time.Second)
assert.Less(t, 0, a)
}
func TestWsService_SetLocale(t *testing.T) {
s := httptest.NewServer(http.HandlerFunc(echoLocales))
defer s.Close()
u := "ws" + strings.TrimPrefix(s.URL, "http")
// Connect to the server
ws := newWsService(u)
err := ws.SetLocale("en")
assert.EqualError(t, err, "Can't set locale. Connection were not initialized")
err = ws.Connect()
if err != nil {
log.Fatal("Can't connect to test server")
}
err = ws.SetLocale("en")
assert.Nil(t, err)
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
"strings"
)
type hightemp struct {
pref string
count int
}
func main() {
file, _ := os.Open("./chap-02/hightemp.txt")
defer file.Close()
sc := bufio.NewScanner(file)
ret := map[string]int{}
var h []hightemp
for sc.Scan() {
s := strings.Split(sc.Text(), "\t")
if _, ok := ret[s[0]]; ok {
ret[s[0]] += 1
} else {
ret[s[0]] = 1
}
}
for i, j := range ret {
h = append(h, hightemp{i, j})
}
sort.Slice(h, func(i, j int) bool {
return h[i].pref < h[j].pref
})
sort.Slice(h, func(i, j int) bool {
return h[i].count > h[j].count
})
fmt.Println(h)
}
|
package venom
import (
"os"
"strings"
"github.com/fsamin/go-dump"
)
var preserveCase string
func init() {
preserveCase = os.Getenv("VENOM_PRESERVE_CASE")
if preserveCase == "" || preserveCase == "AUTO" {
preserveCase = "ON"
}
}
// Dump dumps v as a map[string]interface{}.
func DumpWithPrefix(va interface{}, prefix string) (map[string]interface{}, error) {
e := dump.NewDefaultEncoder()
e.ExtraFields.Len = true
e.ExtraFields.Type = true
e.ExtraFields.DetailedStruct = true
e.ExtraFields.DetailedMap = true
e.ExtraFields.DetailedArray = true
e.Prefix = prefix
// TODO venom >= v1.2 update the PreserveCase behaviour
if preserveCase == "ON" {
e.ExtraFields.UseJSONTag = true
e.Formatters = []dump.KeyFormatterFunc{WithFormatterLowerFirstKey()}
} else {
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
}
return e.ToMap(va)
}
// Dump dumps v as a map[string]interface{}.
func Dump(va interface{}) (map[string]interface{}, error) {
e := dump.NewDefaultEncoder()
e.ExtraFields.Len = true
e.ExtraFields.Type = true
e.ExtraFields.DetailedStruct = true
e.ExtraFields.DetailedMap = true
e.ExtraFields.DetailedArray = true
// TODO venom >= v1.2 update the PreserveCase behaviour
if preserveCase == "ON" {
e.ExtraFields.UseJSONTag = true
e.Formatters = []dump.KeyFormatterFunc{WithFormatterLowerFirstKey()}
} else {
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
}
return e.ToMap(va)
}
// DumpString dumps v as a map[string]string{}, key in lowercase
func DumpString(va interface{}) (map[string]string, error) {
e := dump.NewDefaultEncoder()
e.ExtraFields.Len = true
e.ExtraFields.Type = true
e.ExtraFields.DetailedStruct = true
e.ExtraFields.DetailedMap = true
e.ExtraFields.DetailedArray = true
// TODO venom >= v1.2 update the PreserveCase behaviour
if preserveCase == "ON" {
e.ExtraFields.UseJSONTag = true
e.Formatters = []dump.KeyFormatterFunc{WithFormatterLowerFirstKey()}
} else {
e.Formatters = []dump.KeyFormatterFunc{dump.WithDefaultLowerCaseFormatter()}
}
return e.ToStringMap(va)
}
// DumpStringPreserveCase dumps v as a map[string]string{}
func DumpStringPreserveCase(va interface{}) (map[string]string, error) {
e := dump.NewDefaultEncoder()
e.ExtraFields.Len = true
e.ExtraFields.Type = true
e.ExtraFields.DetailedStruct = true
e.ExtraFields.DetailedMap = true
e.ExtraFields.DetailedArray = true
if preserveCase == "ON" {
e.ExtraFields.UseJSONTag = true
}
return e.ToStringMap(va)
}
func WithFormatterLowerFirstKey() dump.KeyFormatterFunc {
f := dump.WithDefaultFormatter()
return func(s string, level int) string {
if level == 0 {
return strings.ToLower(f(s, level))
}
return f(s, level)
}
}
|
package shakespeare
import (
"context"
"net/http"
)
const baseURL = "https://api.funtranslations.com"
type Service interface {
ConvertText(ctx context.Context, text string) (string, error)
}
type service struct {
client *http.Client
}
func New() Service {
return &service{
client: http.DefaultClient,
}
}
|
package main
import (
"context"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/pingcap/log"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/sessionctx/variable"
"go.uber.org/zap"
)
var logger *zap.Logger
func initLog() (err error) {
var filename = "tidb-audit.log" // TODO: Tweak log configuration by TiDB start arguments.
tidbConf := config.GetGlobalConfig()
baseFolder := filepath.Dir(tidbConf.Log.SlowQueryFile)
conf := &log.Config{
Level: "info",
Format: "text",
File: log.FileLogConfig{
MaxSize: tidbConf.Log.File.MaxSize,
Filename: filepath.Join(baseFolder, filename),
},
}
logger, _, err = log.InitLogger(conf)
if err != nil {
return
}
return
}
type logRecord struct {
columns []zap.Field
}
type logTrait interface {
addConn(connInfo *variable.ConnectionInfo, reason string) logTrait
addGeneral(info *variable.ConnectionInfo, names map[string]struct{}, names2 []string, normalized string, sctx *variable.SessionVars, cmd string) logTrait
log()
}
func auditLog(ctx context.Context, eventClass, eventSubClass string, statusCode int) logTrait {
now, id := auditID()
l := &logRecord{}
costTime := time.Duration(0)
if start := ctx.Value("ExecStartTime"); start != nil {
costTime = time.Since(start.(time.Time))
}
l.columns = append(l.columns, []zap.Field{
zap.String("ID", id),
zap.Time("TIMESTAMP", now),
zap.String("EVENT_CLASS", eventClass),
zap.String("EVENT_SUBCLASS", eventSubClass),
zap.Int("STATUS_CODE", statusCode),
zap.Float64("COST_TIME", float64(costTime)/float64(time.Microsecond)),
}...)
return l
}
func (l *logRecord) addConn(connInfo *variable.ConnectionInfo, reason string) logTrait {
l.fields([]zap.Field{
zap.String("HOST", connInfo.Host),
zap.String("CLIENT_IP", connInfo.ClientIP),
zap.String("USER", connInfo.User),
zap.Strings("DATABASES", []string{connInfo.DB}),
zap.Strings("TABLES", []string{}),
zap.String("SQL_TEXT", ""),
zap.Uint64("ROWS", 0),
zap.String("CLIENT_PORT", connInfo.ClientPort),
zap.Uint32("CONNECTION_ID", connInfo.ConnectionID),
zap.String("CONNECTION_TYPE", connInfo.ConnectionType),
zap.Int("SERVER_ID", connInfo.ServerID),
zap.Int("SERVER_PORT", connInfo.ServerPort),
zap.Float64("DURATION", connInfo.Duration),
zap.String("SERVER_OS_LOGIN_USER", connInfo.ServerOSLoginUser),
zap.String("OS_VERSION", connInfo.OSVersion),
zap.String("CLIENT_VERSION", connInfo.ClientVersion),
zap.String("SERVER_VERSION", connInfo.ServerVersion),
zap.String("AUDIT_VERSION", ""),
zap.String("SSL_VERSION", connInfo.SSLVersion),
zap.Int("PID", connInfo.PID),
zap.String("Reason", reason),
})
return l
}
func (l *logRecord) addGeneral(connInfo *variable.ConnectionInfo, dbNames map[string]struct{}, tableNames []string, normalized string, sctx *variable.SessionVars, cmd string) logTrait {
l.fields([]zap.Field{
zap.String("HOST", connInfo.Host),
zap.String("CLIENT_IP", connInfo.ClientIP),
zap.String("USER", connInfo.User),
zap.Strings("DATABASES", setToSlice(dbNames)),
zap.Strings("TABLES", tableNames),
zap.String("SQL_TEXT", normalized),
zap.Uint64("ROWS", sctx.StmtCtx.AffectedRows()),
zap.Uint32("CONNECTION_ID", connInfo.ConnectionID),
zap.String("CLIENT_PORT", connInfo.ClientPort),
zap.Int("PID", connInfo.PID),
zap.String("COMMAND", cmd),
zap.String("SQL_STATEMENTS", sctx.StmtCtx.StmtType),
})
return l
}
func setToSlice(set map[string]struct{}) (slice []string) {
slice = make([]string, 0, len(set))
for val := range set {
slice = append(slice, val)
}
return
}
func (l *logRecord) field(f zap.Field) *logRecord {
l.columns = append(l.columns, f)
return l
}
func (l *logRecord) fields(fs []zap.Field) logTrait {
l.columns = append(l.columns, fs...)
return l
}
func (l *logRecord) log() {
logger.Info("", l.columns...)
}
var last struct {
sync.Mutex
ts int64
seq int
}
func auditID() (t time.Time, id string) {
last.Lock()
var seq int
t = time.Now()
newTs := t.UnixNano() / int64(time.Second)
timeMoveBack := false
if newTs > last.ts {
seq = 0
last.seq = 0
last.ts = newTs
} else {
if newTs < last.ts {
timeMoveBack = true
}
seq = last.seq
last.seq++ // overflow be negative is ok for audit ID.
}
last.Unlock()
if timeMoveBack {
log.Warn("time is moving backwards")
}
id = strconv.FormatInt(newTs, 10) + strconv.Itoa(seq)
return
}
|
// This documentation describes example APIs found under https://github.com/ribice/golang-swaggerui-example
//
// Schemes: http
// Version: 0.0.1
// Contact: Andriy Tymkiv <a.tymkiv99@gmail.com>
// Host: localhost/goswagg
//
// Consumes:
// - application/json
//
// Produces:
// - application/json
//
// Security:
// - bearer
//
// SecurityDefinitions:
// bearer:
// type: apiKey
// name: Authorization
// in: header
//
// swagger:meta
package main
import (
"crypto/sha1"
"flag"
"github.com/atymkiv/echo_frame_learning/blog/cmd/api/auth"
al "github.com/atymkiv/echo_frame_learning/blog/cmd/api/auth/logging"
gormmsqlA "github.com/atymkiv/echo_frame_learning/blog/cmd/api/auth/platform/gormsql"
at "github.com/atymkiv/echo_frame_learning/blog/cmd/api/auth/transport"
ps "github.com/atymkiv/echo_frame_learning/blog/cmd/api/post"
gormsqlP "github.com/atymkiv/echo_frame_learning/blog/cmd/api/post/platform/gormsql"
ptr "github.com/atymkiv/echo_frame_learning/blog/cmd/api/post/transport"
us "github.com/atymkiv/echo_frame_learning/blog/cmd/api/user"
"github.com/atymkiv/echo_frame_learning/blog/cmd/api/user/platform/gormsql"
ut "github.com/atymkiv/echo_frame_learning/blog/cmd/api/user/transport"
"github.com/atymkiv/echo_frame_learning/blog/pkg/middleware/jwt"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/config"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/gorm"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/grpc"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/messages"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/nats"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/secure"
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/server"
)
func main() {
cfgPath := flag.String("p", "./cmd/api/config.json", "Path to config file")
flag.Parse()
cfg, err := config.Load(*cfgPath)
checkErr(err)
checkErr(Start(cfg))
}
func Start(cfg *config.Configuration) error {
db, err := gorm.New(&cfg.DB)
if err != nil {
return err
}
sec := secure.New(sha1.New())
jwt := jwt.New(cfg.JWT.Secret, cfg.JWT.SigningAlgorithm, cfg.JWT.Duration)
e := server.New()
authDB := gormmsqlA.NewUser(db)
at.NewHTTP(al.New(auth.New(authDB, jwt, sec)), e)
natsClient, err := nats.New(cfg.Nats)
checkErr(err)
messageService := messages.Create(natsClient)
userDB := gormsql.NewUser(db)
ut.NewHTTP(us.New(userDB, messageService), e)
v2 := e.Group("/post")
v2.Use(jwt.MWFunc())
postDB := gormsqlP.NewPost(db)
grpcClient, err := grpc.New(cfg.GRPC)
checkErr(err)
ptr.NewHTTP(ps.New(postDB, jwt, grpcClient), v2)
server.Start(e, &server.Config{
Port: cfg.Server.Port,
ReadTimeoutSeconds: cfg.Server.ReadTimeout,
WriteTimeoutSeconds: cfg.Server.WriteTimeout,
Debug: cfg.Server.Debug,
})
return nil
}
func checkErr(err error) {
if err != nil {
panic(err.Error())
}
}
|
package entity
import (
"github.com/jinzhu/gorm"
//"time"
)
// UserAccount 用户账户信息结构体
type UserAccount struct {
gorm.Model
Account string
Password string
PermanentID string //用户所有操作使用此ID
Name string
BankCard string
WeChat string
Alipay string
Telephone string
Email string
}
|
package steps
import (
"context"
"github.com/chromedp/cdproto/cdp"
"github.com/chromedp/cdproto/network"
"github.com/chromedp/chromedp"
"github.com/pkg/errors"
"net/http"
"time"
)
type User interface {
signIn(username string) error
isSignedIn() bool
signOut() error
resetUser(fakeApi *FakeApi, ctx context.Context)
setChromeCtx(ctx context.Context)
setFakeApi(fakeApi *FakeApi)
setAuthCookies()
}
func GenerateCookie(name, value, domain, path string, httpOnly bool) *http.Cookie {
if domain == "" {
domain = "localhost"
}
return &http.Cookie{
Name: name,
Value: value,
Path: path,
Domain: domain,
HttpOnly: httpOnly,
Secure: true,
MaxAge: 0,
SameSite: http.SameSiteStrictMode,
}
}
func SetCookies(cookies []*http.Cookie) chromedp.Action {
return chromedp.ActionFunc(func(ctx context.Context) error {
expr := cdp.TimeSinceEpoch(time.Now().Add(180 * 24 * time.Hour))
for _, cookie := range cookies {
err := network.SetCookie(cookie.Name, cookie.Value).
WithExpires(&expr).
WithDomain(cookie.Domain).
WithPath(cookie.Path).
WithHTTPOnly(cookie.HttpOnly).
WithSecure(cookie.Secure).
Do(ctx)
if err != nil {
return errors.New("set cookie error")
}
}
return nil
})
}
|
package main
import
// This is the graphics library we are going to use. It is called the
// Simple Direct Media Library. SDL for short. We need this to create the
// window and to provide the drawing functions we need.
"github.com/gophercoders/toolbox"
// These are the variables for the graphics library
// They have to be outside of the main function because the functions at the
// end of the file need them.
// This the window we are going to draw into
var window toolbox.Window
// These variabels are important. They are the width and height of the window
// If you change these you will change the size of the image
var windowWidth int
var windowHeight int
// The programs main function
func main() {
// ---- This is the start of Owen's graphics setup code ----
// First we have to initalise the toolbox, before we can use it
toolbox.Initialise()
// defer is a go keyword and a special feature.
// This means that go will automatically call the function toolbox.Close() before
// the program exits for us. We don't have to remember to put this at the end!
defer toolbox.Close()
// if you want to change these try 800 for the width and 600 for the height
windowWidth = 1024
windowHeight = 768
// Now we have to create the window we want to use.
// We need to tell the SDL library how big to make the window of the correct
// size - that's what the bit in the brackets does
window = toolbox.CreateWindow("Pong Game", windowWidth, windowHeight)
// automatically destroy the window when the program finishes
defer toolbox.DestroyWindow(window)
// Set a black i.e. RGBA (0,0,0,0) background colour and clear the window
toolbox.SetBackgroundColour(0, 0, 0)
toolbox.ClearBackground()
// ---- This is the end of Owen's graphics setup code ----
// defer any cleanup actions
defer cleanup()
// initialise the games variables.
initialise()
// render everything initially so that we can see the game before it starts
render()
// now start the main game loop of the game.
gameMainLoop()
}
// Initialise sets the inital values of the game state variables.
// Initialise must be called before the games main loop starts.
func initialise() {
}
// GameMainLoop controls the game. It performs three manin tasks. The first task
// is to get the users input. The second task is to update the games state based
// on the user input and the rules of the game. The final task is to update, or
// render, the changes to the screen.
func gameMainLoop() {
for {
getInput()
updateState()
render()
}
}
// Cleanup is used to ensure that we free all memory before the program
// exists.
func cleanup() {
}
// GetInput gets the users input and updates the game state variables that realte
// to the users input, for example, the direction that the user wants to move their
// bat in.
func getInput() {
}
// UpdateGameState updates the game state variables based on the user input and
// the rules of the game.
func updateState() {
}
// Render updates the screen, based on the new positions of the bats and the ball.
func render() {
// Set a black i.e. RGBA (0,0,0,0) background colour and clear the window
toolbox.ShowWindow()
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package types
type RecvBlockType int
const (
RecvBlockTypeMined = iota
RecvBlockTypeRemote
RecvBlockTypeReorganize
)
type RecvWrap struct {
SendBreakWork bool
Block *Block
}
|
package main
//import "fmt"
//
//func main() {
//
// var x string =nil //Cannot use 'nil' as type string
// if x==nil { //Cannot convert 'nil' to type 'string'
// x="default"
// }
// fmt.Println(x)
//}
|
package main
var router = createMux()
|
package main
import "fmt"
func main() {
fmt.Println(findKthLargest([]int{
3, 2, 1, 5, 6, 4,
}, 2))
fmt.Println(findKthLargest([]int{
3, 2, 3, 1, 2, 4, 5, 5, 6,
}, 4))
}
func findKthLargest(nums []int, k int) int {
var quick func(low, right int)
quick = func(low, height int) {
if low >= height {
return
}
left, right := low, height
p := nums[right]
for left < right {
for left < right && nums[left] <= p {
left++
}
nums[left], nums[right] = nums[right], nums[left]
for left < right && nums[right] >= p {
right--
}
nums[left], nums[right] = nums[right], nums[left]
}
if left < len(nums)-k {
quick(left+1, height)
} else if left > len(nums)-k {
quick(low, left-1)
} else {
return
}
}
quick(0, len(nums)-1)
return nums[len(nums)-k]
}
|
package lib
import (
"io"
"time"
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/format"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/proto/objectserver"
)
func addObjects(conn *srpc.Conn, decoder srpc.Decoder, encoder srpc.Encoder,
adder ObjectAdder, logger log.Logger) error {
defer conn.Flush()
logger.Printf("AddObjects(%s) starting\n", conn.RemoteAddr())
numAdded := 0
numObj := 0
startTime := time.Now()
var bytesAdded, bytesReceived uint64
for ; ; numObj++ {
var request objectserver.AddObjectRequest
var response objectserver.AddObjectResponse
if err := decoder.Decode(&request); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
}
return errors.New("error decoding: " + err.Error())
}
if request.Length < 1 {
break
}
var err error
response.Hash, response.Added, err =
adder.AddObject(conn, request.Length, request.ExpectedHash)
response.ErrorString = errors.ErrorToString(err)
if err := encoder.Encode(response); err != nil {
return errors.New("error encoding: " + err.Error())
}
if response.ErrorString != "" {
logger.Printf(
"AddObjects(): failed, %d of %d so far are new objects: %s",
numAdded, numObj+1, response.ErrorString)
return nil
}
bytesReceived += request.Length
if response.Added {
bytesAdded += request.Length
numAdded++
}
}
duration := time.Since(startTime)
speed := uint64(float64(bytesReceived) / duration.Seconds())
logger.Printf(
"AddObjects(): %d (%s) of %d (%s) in %s (%s/s) are new objects",
numAdded, format.FormatBytes(bytesAdded),
numObj, format.FormatBytes(bytesReceived),
format.Duration(duration), format.FormatBytes(speed))
return nil
}
|
package v1
import (
"context"
"github.com/gin-gonic/gin"
"github.com/jmoiron/sqlx"
"github.com/moyrne/tebot/internal/analyze"
"github.com/moyrne/tebot/internal/database"
"github.com/moyrne/tebot/internal/logs"
"github.com/moyrne/tebot/internal/models"
"github.com/moyrne/tebot/internal/service/commands"
"github.com/pkg/errors"
"net/http"
"time"
)
type CqHTTP struct{}
const (
PTMessage = "message"
PTEvent = "meta_event"
MTPrivate = "private"
MTGroup = "group"
)
// HTTP 文档 https://github.com/ishkong/go-cqhttp-docs/tree/main/docs/event
func (h CqHTTP) HTTP(c *gin.Context) {
var params QMessage
if err := c.BindJSON(¶ms); err != nil {
// TODO log error
logs.Info("cqhttp", "unmarshal error", err)
return
}
if params.PostType == PTEvent {
// 忽略 心跳检测
commands.CQHeartBeat()
return
}
if params.PostType != PTMessage {
// 忽略 非消息事件
logs.Error("unknown params", "params", params)
return
}
qmModel := params.Model()
// 优先提供服务 记录失败忽略
err := database.NewTransaction(c.Request.Context(), func(ctx context.Context, tx *sqlx.Tx) error {
return qmModel.Insert(c.Request.Context(), tx)
})
if err != nil {
logs.Error("insert CqHTTP params failed", "error", err)
}
// User Filter, 检查是否被禁用
if qmModel.QUser.Ban {
logs.Info("quser has been band", "user", qmModel.QUser)
return
}
var reply Reply
switch params.MessageType {
case MTPrivate:
reply, err = h.private(c, qmModel)
case MTGroup:
reply, err = h.group(c, qmModel)
default:
// log error
logs.Error("unsupported message_type", "type", params.MessageType)
return
}
if errors.Is(err, analyze.ErrNotMatch) {
return
}
if err != nil {
logs.Info("get reply", "error", err)
return
}
if err = database.NewTransaction(c.Request.Context(), func(ctx context.Context, tx *sqlx.Tx) error {
qmModel.Reply = reply.Reply
return qmModel.SetReply(ctx, tx)
}); err != nil {
logs.Error("update cqhttp params failed", "error", err)
}
// 等待3秒 回复
time.Sleep(time.Second * 3)
logs.Info("reply", "content", reply)
c.JSON(http.StatusOK, reply)
}
func (h CqHTTP) private(c *gin.Context, params *models.QMessage) (Reply, error) {
reply, err := analyze.Analyze(c.Request.Context(), analyze.Params{QUID: params.UserID, Message: params.Message})
if err == nil {
return Reply{Reply: reply}, nil
}
return Reply{}, err
}
func (h CqHTTP) group(c *gin.Context, params *models.QMessage) (Reply, error) {
if err := database.NewTransaction(c.Request.Context(), func(ctx context.Context, tx *sqlx.Tx) error {
_, err := models.GetQGroupByQGID(ctx, tx, params.GroupID)
return err
}); err != nil {
return Reply{}, err
}
reply, err := analyze.Analyze(c.Request.Context(), analyze.Params{QUID: params.UserID, Message: params.Message})
if err != nil {
return Reply{}, err
}
return Reply{Reply: reply, ATSender: true}, nil
}
|
package leetcode
import "fmt"
func main(){
fmt.Printf("%v", countSmaller([]int{5,2,6,1}))
}
func countSmaller(nums []int) []int {
counts := make([]int, len(nums))
sorts := make([]int, len(nums))
for i := len(nums)-1; i >= 0; i--{
counts[i] = find(sorts, nums[i], len(nums)-i-1)
}
return counts
}
func find(sorts []int, val ,l int) int {
if l == 0 {
sorts[0] = val
return 0
}
res := divide(sorts, val, 0, l-1)
for i := l; i > res; i--{
sorts[i] = sorts[i-1]
}
sorts[res] = val
return res
}
func divide(sorts []int, val int, left, right int)int{
if left == right {
if sorts[left] >= val{
return left
}
return left + 1
}
mid := (left+right)/2
if sorts[mid] >= val{
if mid == left {
return left
}
return divide(sorts, val, left, mid-1)
}
return divide(sorts, val, mid+1, right)
}
|
package nats
import (
"context"
"errors"
"sync"
pubsub "github.com/zhangce1999/pubsub/interface"
)
var (
errInvalidTopic = errors.New("[error]: invalid topic")
errInvalidChannel = errors.New("[error]: invalid channel")
errInvalidConnection = errors.New("[error]: invalid connection")
errInvalidBroker = errors.New("[error]: invalid broker")
errInvalidPublisher = errors.New("[error]: invalid publisher")
errInvalidSubscriber = errors.New("[error]: invalid subscriber")
errInvalidMultiPublish = errors.New("[error]: multiPublish error")
errEmptyData = errors.New("[error]: empty data")
)
var _ pubsub.Publisher = &Publisher{}
// Publisher -
type Publisher struct {
// rw represents a Read/Write Mutex
rw *sync.Mutex
Topic string
MsgsNum int
Opts *pubsub.PublisherOptions
}
// Publish -
func (p *Publisher) Publish(ctx context.Context, b pubsub.Broker, topic string, in chan string, errChan chan error)
// PublishRequest -
func (p *Publisher) PublishRequest(ctx context.Context, b pubsub.Broker, topic string, reply string, in chan string, errChan chan error)
// Flush -
func (p *Publisher) Flush() error
// Close -
func (p *Publisher) Close()
|
package main
import (
"fmt"
"github.com/jackytck/projecteuler/tools"
)
func solve() int {
side := 3
prime := 3
corner := 9
for 10*prime > 2*side-1 {
side += 2
step := side - 1
for i := 0; i < 3; i++ {
corner += step
if tools.IsPrime(corner) {
prime++
}
}
corner += step
}
return side
}
func main() {
fmt.Println(solve())
}
// What is the side length of the square spiral for which the ratio of primes
// along both diagonals first falls below 10%?
|
package data_structures
import "fmt"
type BinarySearchTree struct {
Root *BstNode
}
type BstNode struct {
Value int
LeftNode *BstNode
RightNode *BstNode
}
func GetBst() *BinarySearchTree{
return &BinarySearchTree{}
}
func (bst *BinarySearchTree) Print(value int){
fmt.Println(value)
}
func (bst *BinarySearchTree) Traverse(node *BstNode, callback func(val int) ){
if node == nil {
return
}
bst.Traverse(node.LeftNode, bst.Print)
callback(node.Value)
bst.Traverse(node.RightNode, bst.Print)
}
func (bst *BinarySearchTree) Add(value int){
if bst.Root == nil {
node := &BstNode{
Value: value,
LeftNode: nil,
RightNode: nil,
}
bst.Root = node
return
}
newNode := &BstNode{
Value: value,
LeftNode: nil,
RightNode: nil,
}
current := bst.Root
var parent *BstNode
for {
parent = current
if value < parent.Value {
current = current.LeftNode
if current == nil {
parent.LeftNode = newNode
break
}
} else {
current = current.RightNode
if current == nil {
parent.RightNode = newNode
break
}
}
}
}
|
package gotest
import "testing"
func TestBasic(test *testing.T) {
grade := "D"
if grade != "D" {
test.Error("Test Case failed.")
}
}
|
package config
import (
"testing"
)
func Test_NewSpeConfig(t *testing.T) {
t.Log("Start to init…")
speConfig, err := NewSpeConfig("../static/cuttle.yaml")
if err != nil {
t.Errorf("Failed to init,err=%s", err)
}
t.Logf("%+v", speConfig)
t.Log("End Init!!!")
err = speConfig.Marshal("../static/cuttle1.yaml")
if err != nil {
t.Errorf("Failed to Marshal,err=%s", err)
}
}
|
package logger
import (
"errors"
"os"
"path"
"path/filepath"
"syscall"
"github.com/sirupsen/logrus"
)
// Log log
var Log = logrus.New()
// LogError log error
var LogError = logrus.New()
// Debug debug logger
var Debug = Log.Debug
// Debugf debug formatting logger
var Debugf = Log.Debugf
// Info info logger
var Info = Log.Info
// Infof info formatting logger
var Infof = Log.Infof
// Warn warning logger
var Warn = Log.Warn
// Warnf warning formatting logger
var Warnf = Log.Warnf
// Error error logger
var Error = LogError.Error
// Errorf error formatting logger
var Errorf = LogError.Errorf
// Fatal fatal logger
var Fatal = LogError.Fatal
// Fatalf fatal formatting logger
var Fatalf = LogError.Fatalf
// Init init
func Init() {
Log = logrus.New()
LogError = logrus.New()
err := initLogger(Log, "../logs/info.log")
if err != nil {
LogError.Error(err)
LogError.Info("failed to log to file, using default stderr")
}
err = initLogger(LogError, "../logs/error.log")
if err != nil {
LogError.Error(err)
LogError.Info("failed to log to file, using default stderr")
}
}
func initLogger(logger *logrus.Logger, logPath string) error {
mask := syscall.Umask(0)
defer syscall.Umask(mask)
// create dir
logPath, _ = filepath.Abs(logPath)
logDir := path.Dir(logPath)
err := os.MkdirAll(logDir, 0755)
if os.IsExist(err) {
stat, err := os.Stat(logDir)
if err != nil {
return err
}
if !stat.IsDir() {
return errors.New("path exists but is not a directory")
}
return nil
}
// create file
stat, err := os.Stat(logPath)
if err != nil {
if os.IsNotExist(err) {
_, err2 := os.Create(logPath)
if err2 != nil {
return err2
}
stat, err = os.Stat(logPath)
if err2 != nil {
return err2
}
} else {
return err
}
}
logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
return err
}
if !stat.IsDir() {
logger.Out = logFile
}
return nil
}
|
//+build test
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package daemonset
import (
"context"
"encoding/json"
"log"
"os/exec"
"time"
"github.com/Azure/aks-engine/test/e2e/kubernetes/pod"
"github.com/Azure/aks-engine/test/e2e/kubernetes/util"
"github.com/pkg/errors"
)
// Daemonset is used to parse data from kubectl get daemonsets
type Daemonset struct {
Metadata Metadata `json:"metadata"`
Spec Spec `json:"spec"`
Status Status `json:"status"`
}
// Metadata holds information like name, createdat, labels, and namespace
type Metadata struct {
CreatedAt time.Time `json:"creationTimestamp"`
Labels map[string]string `json:"labels"`
Name string `json:"name"`
Namespace string `json:"namespace"`
}
type Spec struct {
Template Template `json:"template"`
}
// Template is used for fetching the daemonset spec -> containers
type Template struct {
TemplateSpec TemplateSpec `json:"spec"`
}
// TemplateSpec holds the list of containers for a daemonset
type TemplateSpec struct {
Containers []Container `json:"containers"`
}
// Container holds information like image
type Container struct {
Image string `json:"image"`
}
// Status holds information like hostIP and phase
type Status struct {
CurrentNumberScheduled int `json:"currentNumberScheduled"`
DesiredNumberScheduled int `json:"desiredNumberScheduled"`
NumberAvailable int `json:"numberAvailable"`
NumberReady int `json:"numberReady"`
}
// List is a container that holds all pods returned from doing a kubectl get daemonsets
type List struct {
Daemonsets []Daemonset `json:"items"`
}
// GetResult is a return struct for GetAsync
type GetResult struct {
ds *Daemonset
err error
}
// Delete will delete a Daemonset in a given namespace
func (d *Daemonset) Delete(retries int) error {
var zeroValueDuration time.Duration
var kubectlOutput []byte
var kubectlError error
for i := 0; i < retries; i++ {
cmd := exec.Command("k", "delete", "daemonset", "-n", d.Metadata.Namespace, d.Metadata.Name)
kubectlOutput, kubectlError = util.RunAndLogCommand(cmd, zeroValueDuration)
if kubectlError != nil {
log.Printf("Error while trying to delete DaemonSet %s in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(kubectlOutput))
continue
}
break
}
return kubectlError
}
// CreateDaemonsetDeleteIfExists will create a daemonset, deleting any pre-existing daemonset with the same name + namespace
func CreateDaemonsetDeleteIfExists(filename, name, namespace, labelKey, labelVal string, sleep, timeout time.Duration) (*Daemonset, error) {
d, err := Get(name, namespace, 3)
if err == nil {
log.Printf("daemonset %s in namespace %s already exists, will delete\n", name, namespace)
err = d.Delete(3)
if err != nil {
log.Printf("unable to delete daemonset %s in namespace %s\n", name, namespace)
return nil, err
}
}
_, err = pod.WaitForMaxRunningByLabelWithRetry(0, labelKey, labelVal, namespace, 500*time.Millisecond, timeout)
if err != nil {
return nil, err
}
return CreateDaemonsetFromFileWithRetry(filename, name, namespace, sleep, timeout)
}
// CreateDaemonsetFromFile will create a Pod from file with a name
func CreateDaemonsetFromFile(filename, name, namespace string, sleep, timeout time.Duration) (*Daemonset, error) {
cmd := exec.Command("k", "apply", "-f", filename)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create Daemonset %s:%s\n", name, string(out))
return nil, err
}
d, err := GetWithRetry(name, namespace, sleep, timeout)
if err != nil {
log.Printf("Error while trying to fetch Daemonset %s:%s\n", name, err)
return nil, err
}
return d, nil
}
// CreateDaemonsetFromFileAsync wraps CreateDaemonsetFromFile with a struct response for goroutine + channel usage
func CreateDaemonsetFromFileAsync(filename, name, namespace string, sleep, timeout time.Duration) GetResult {
ds, err := CreateDaemonsetFromFile(filename, name, namespace, sleep, timeout)
return GetResult{
ds: ds,
err: err,
}
}
// CreateDaemonsetFromFileWithRetry will kubectl apply a Daemonset from file with a name with retry toleration
func CreateDaemonsetFromFileWithRetry(filename, name, namespace string, sleep, timeout time.Duration) (*Daemonset, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentCreateDaemonsetFromFileWithRetryError error
var ds *Daemonset
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- CreateDaemonsetFromFileAsync(filename, name, namespace, sleep, timeout)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentCreateDaemonsetFromFileWithRetryError = result.err
ds = result.ds
if mostRecentCreateDaemonsetFromFileWithRetryError == nil {
if ds != nil {
return ds, nil
}
}
case <-ctx.Done():
return ds, errors.Errorf("CreateDaemonsetFromFileWithRetry timed out: %s\n", mostRecentCreateDaemonsetFromFileWithRetryError)
}
}
}
// Get will return a daemonset with a given name and namespace
func Get(dsName, namespace string, retries int) (*Daemonset, error) {
ds := Daemonset{}
var out []byte
var err error
for i := 0; i < retries; i++ {
cmd := exec.Command("k", "get", "daemonsets", dsName, "-n", namespace, "-o", "json")
out, err = cmd.CombinedOutput()
if err == nil {
jsonErr := json.Unmarshal(out, &ds)
if jsonErr != nil {
log.Printf("Error unmarshalling pods json:%s\n", jsonErr)
err = jsonErr
}
}
time.Sleep(3 * time.Second)
}
return &ds, err
}
// GetAsync wraps Get with a struct response for goroutine + channel usage
func GetAsync(dsName, namespace string) GetResult {
ds, err := Get(dsName, namespace, 1)
return GetResult{
ds: ds,
err: err,
}
}
// GetWithRetry gets a daemonset, allowing for retries
func GetWithRetry(dsName, namespace string, sleep, timeout time.Duration) (*Daemonset, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentGetWithRetryError error
var ds *Daemonset
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetAsync(dsName, namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentGetWithRetryError = result.err
ds = result.ds
if mostRecentGetWithRetryError == nil {
if ds != nil {
return ds, nil
}
}
case <-ctx.Done():
return nil, errors.Errorf("GetWithRetry timed out: %s\n", mostRecentGetWithRetryError)
}
}
}
|
package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"path"
"sort"
"strconv"
"strings"
"time"
yaml "gopkg.in/yaml.v2"
)
const (
OK = "200 OK"
HTTP = "http://"
HTTPS = "https://"
)
type ElasticSearch struct {
nodes []Node
indices []Index
url string
}
type Node struct {
FileSystem FileSystem `json:"fs"`
}
type FileSystem struct {
Total FileSystemTotal `json:"total"`
}
type FileSystemTotal struct {
TotalInBytes int64 `json:"total_in_bytes"`
FreeInBytes int64 `json:"free_in_bytes"`
AvailableInBytes int64 `json:"available_in_bytes"`
}
type Index struct {
CreationDate int64 `json:"creation_date"`
ProvidedName string `json:"provided_name"`
}
type Indices []Index
func (indices Indices) Len() int { return len(indices) }
func (indices Indices) Swap(i, j int) { indices[i], indices[j] = indices[j], indices[i] }
func (indices Indices) Less(i, j int) bool { return indices[i].CreationDate < indices[j].CreationDate }
func getURLWhitShemaString(url string) string {
if !strings.HasPrefix(url, HTTP) {
if strings.HasPrefix(url, HTTPS) {
url = strings.Replace(url, HTTPS, HTTP, 1)
} else {
url = HTTP + url
}
}
return url
}
func setHeaders(request *http.Request, headers map[string][]string) {
for header, values := range headers {
for _, value := range values {
request.Header.Set(header, value)
}
}
}
func setForm(request *http.Request, form map[string]string) {
for key, value := range form {
request.Form.Add(key, value)
}
}
func MakeHTTPRequest(method, url string, body []byte, headers map[string][]string, form map[string]string) (string, []byte, error) {
url = getURLWhitShemaString(url)
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return "", nil, err
}
setHeaders(req, headers)
setForm(req, form)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp.Status, nil, err
}
return resp.Status, responseBody, nil
}
func MakeGetHTTPrequest(url string) (string, []byte, error) {
return MakeHTTPRequest("GET", url, nil, nil, nil)
}
func MakeDeleteHTTPrequest(url string) (string, []byte, error) {
return MakeHTTPRequest("DELETE", url, nil, nil, nil)
}
func (e *ElasticSearch) GetIndices() []Index {
indices := make([]Index, len(e.indices))
copy(indices, e.indices)
return indices
}
func (e *ElasticSearch) GetNode(index int) (Node, error) {
if index >= 0 && index < len(e.nodes) {
return e.nodes[index], nil
}
return Node{}, errors.New("index out of bound")
}
func (e *ElasticSearch) DeleteIndex(name string) error {
url := path.Join(e.url, "/"+name)
status, body, err := MakeDeleteHTTPrequest(url)
if err != nil {
return err
} else if status != OK {
return errors.New("delete index" + name + " status: " + status + "/n" + string(body))
}
log.Printf("Index %s has been deleted!\n", name)
return nil
}
func (e *ElasticSearch) getIndicesNames() ([]string, error) {
url := path.Join(e.url, "/_cat/indices?format=json")
status, body, err := MakeGetHTTPrequest(url)
if err != nil {
return nil, err
} else if status != OK {
return nil, errors.New("Return status from getIndicesNames: " + status)
}
type Indices []struct {
Health string `json:"health"`
Status string `json:"status"`
Index string `json:"index"`
UUID string `json:"uuid"`
Pri string `json:"pri"`
Rep string `json:"rep"`
DocsCount string `json:"docs.count"`
DocsDeleted string `json:"docs.deleted"`
StoreSize string `json:"store.size"`
PriStoreSize string `json:"pri.store.size"`
}
indices := new(Indices)
err = json.Unmarshal(body, indices)
if err != nil {
return nil, errors.New("In getIndicesNames there was a problem with unmurshaling response body: " + err.Error())
}
indicesNames := make([]string, 0, len(*indices))
for _, index := range *indices {
indicesNames = append(indicesNames, index.Index)
}
return indicesNames, nil
}
func (e *ElasticSearch) getIndex(indexName string) (*Index, error) {
url := path.Join(e.url, "/"+indexName+"/_settings")
status, body, err := MakeGetHTTPrequest(url)
if err != nil {
return nil, err
} else if status != OK {
return nil, errors.New("Return status from extactNode: " + status)
}
data := make(map[string]interface{})
err = json.Unmarshal(body, &data)
if err != nil {
return nil, err
}
rawIndexMap, ok := data[indexName].(map[string]interface{})["settings"].(map[string]interface{})["index"].(map[string]interface{})
if !ok {
return nil, errors.New("can't extract raw index map for index: " + indexName)
}
return getIndexFromRawMap(rawIndexMap)
}
func getIndexFromRawMap(rawIndexMap map[string]interface{}) (*Index, error) {
if rawIndexMap == nil {
return nil, errors.New("Nil rawIndexMap")
}
providedName, ok := rawIndexMap["provided_name"].(string)
if !ok {
return nil, errors.New("can't extract provided name from index")
}
creationDateStr, ok := rawIndexMap["creation_date"].(string)
if !ok {
return nil, errors.New("can't extract creating date from index: " + providedName)
}
creationDate, err := strconv.ParseInt(creationDateStr, 10, 64)
if err != nil {
return nil, err
}
return &Index{
ProvidedName: providedName,
CreationDate: creationDate,
}, nil
}
func (e *ElasticSearch) ExtractIndices() error {
indicesNames, err := e.getIndicesNames()
if err != nil {
return err
}
e.indices = make([]Index, 0, len(indicesNames))
for _, indexName := range indicesNames {
index, err := e.getIndex(indexName)
if err != nil {
return err
}
e.indices = append(e.indices, *index)
}
return nil
}
func (e *ElasticSearch) Init(url string) {
e.url = url
}
func (e *ElasticSearch) ExtractNodes() error {
url := path.Join(e.url, "/_nodes/stats/fs")
status, body, err := MakeGetHTTPrequest(url)
if err != nil {
return err
} else if status != OK {
return errors.New("Return status from extactNode: " + status)
}
data := make(map[string]interface{})
err = json.Unmarshal(body, &data)
if err != nil {
return errors.New("In extractNode there was a problem with unmurshaling response body: " + err.Error())
}
data, ok := data["nodes"].(map[string]interface{})
if !ok {
return errors.New("Error extracting row nodes")
}
e.nodes = make([]Node, 0, len(data))
for rawNodeName, rawNodeInterface := range data {
rawNodeMap, ok := rawNodeInterface.(map[string]interface{})
if !ok {
return errors.New("Can't extract raw node map for " + rawNodeName)
}
node, err := getNode(rawNodeMap)
if err != nil {
return err
}
e.nodes = append(e.nodes, *node)
}
return nil
}
func getNode(rawNodeMap map[string]interface{}) (*Node, error) {
rawFileSystemMap, ok := rawNodeMap["fs"].(map[string]interface{})
if !ok {
return nil, errors.New("Can't extract file system for node")
}
fileSystem, err := getFileSystem(rawFileSystemMap)
if err != nil {
return nil, err
}
return &Node{
FileSystem: *fileSystem,
}, nil
}
func getFileSystem(rawFileSystem map[string]interface{}) (*FileSystem, error) {
rawFileSystemTotalMap, ok := rawFileSystem["total"].(map[string]interface{})
if !ok {
return nil, errors.New("Can't extract tatal from file system")
}
total, err := getFileSystemTotal(rawFileSystemTotalMap)
if err != nil {
return nil, err
}
return &FileSystem{
Total: *total,
}, nil
}
func getFileSystemTotal(rawFileSystemTotalMap map[string]interface{}) (*FileSystemTotal, error) {
totalInBytes, ok := rawFileSystemTotalMap["total_in_bytes"].(float64)
if !ok {
return nil, errors.New("Can't extract total in bytes for node")
}
freeInBytes, ok := rawFileSystemTotalMap["free_in_bytes"].(float64)
if !ok {
return nil, errors.New("Can't extract free in bytes for node")
}
availableInBytes, ok := rawFileSystemTotalMap["available_in_bytes"].(float64)
if !ok {
return nil, errors.New("Can't extract available in bytes for node")
}
return &FileSystemTotal{
TotalInBytes: int64(totalInBytes),
FreeInBytes: int64(freeInBytes),
AvailableInBytes: int64(availableInBytes),
}, nil
}
func getBytesLeft(cluster *ElasticSearch) (int64, error) {
cluster.ExtractNodes()
node, err := cluster.GetNode(0)
if err != nil {
return 0, errors.New("no node avialable")
}
return node.FileSystem.Total.AvailableInBytes, nil
}
func removeOldestIndex(cluster *ElasticSearch) error {
cluster.ExtractIndices()
indices := cluster.GetIndices()
if len(indices) < 1 {
return errors.New("there are no indicies")
}
sort.Sort(Indices(indices))
indexName := indices[0].ProvidedName
return cluster.DeleteIndex(indexName)
}
func convertIntervaceKeyMapToStringKeyOne(data map[interface{}]interface{}) map[string]interface{} {
result := make(map[string]interface{})
for key, value := range data {
strKey := fmt.Sprintf("%v", key)
result[strKey] = value
}
return result
}
func converSliceOfInterfacesToSliceOfStrings(slice []interface{}) []string {
result := make([]string, len(slice))
for index, value := range slice {
strValue := fmt.Sprintf("%v", value)
result[index] = strValue
}
return result
}
// client:
// hosts:
// - elasticsearch-logging.garden.svc
// port: 9200
func getEsApiFromConf(filename string) (string, error) {
yamlFile, err := ioutil.ReadFile(filename)
if err != nil {
return "", err
}
data := make(map[string]interface{})
err = yaml.Unmarshal(yamlFile, data)
if err != nil {
return "", err
}
rawClient, ok := data["client"].(map[interface{}]interface{})
if !ok {
return "", errors.New("can't find client section in config file")
}
client := convertIntervaceKeyMapToStringKeyOne(rawClient)
rawHosts, ok := client["hosts"].([]interface{})
if !ok {
return "", errors.New("can't find hosts section in client section in config file")
}
hosts := converSliceOfInterfacesToSliceOfStrings(rawHosts)
if len(hosts) < 1 {
return "", errors.New("empty hosts section in client section in config file")
}
rawPort, ok := client["port"].(int)
if !ok {
return "", errors.New("can't find port section in client section in config file")
}
port := strconv.Itoa(int(rawPort))
return hosts[0] + ":" + port, nil
}
func main() {
diskSpaceThreshold := flag.Int64("disk_threshold", 100000000, "The minimum maximum disk space left before deletion of the index")
esAPI := flag.String("es_api", "", "The elasticsearch API")
config := flag.String("config", "/etc/config/config.yml", "The config file for the curator")
flag.Parse()
var err error
if *esAPI == "" {
*esAPI, err = getEsApiFromConf(*config)
if err != nil {
log.Println(err.Error())
}
if *esAPI == "" {
*esAPI = "localhost:9200"
}
}
cluster := &ElasticSearch{}
cluster.Init(*esAPI)
bytesLeft, err := getBytesLeft(cluster)
if err != nil {
log.Println(err.Error())
return
}
log.Printf("available bytes: %d", bytesLeft)
for bytesLeft < *diskSpaceThreshold {
log.Printf("bytes left: %d\nNeed: %d\n", bytesLeft, *diskSpaceThreshold)
err = removeOldestIndex(cluster)
if err != nil {
log.Println(err.Error())
return
}
time.Sleep(time.Duration(5) * time.Second)
bytesLeft, err = getBytesLeft(cluster)
if err != nil {
log.Println(err.Error())
return
}
}
}
|
package v2
//nsq 生产者,消费者 自行处理生产,消费内容
import (
"github.com/nsqio/go-nsq"
"smallgamepk.qcwanwan.com/utils"
)
type Nsqer interface {
Producer(addr string)(*nsq.Producer,error)
Customer(addr,topic,channel string,dat chan interface{})(error)
}
type Nsq struct {}
func NewNsq() Nsqer{
return &Nsq{}
}
/*
if err := producer.Publish("test", []byte("test message")); err != nil {
log.Fatal("publish error: " + err.Error())
}
*/
func (self *Nsq) Producer(addr string)(*nsq.Producer,error){
cfg := nsq.NewConfig()
NsqProducer, err := nsq.NewProducer(addr, cfg)
if err != nil {
utils.Log(err)
return nil,err
}
return NsqProducer,nil
}
func (self *Nsq) Customer(addr,topic,channel string ,data chan interface{}) error{
cfg := nsq.NewConfig()
consumer, err := nsq.NewConsumer(topic, channel, cfg)
if err != nil {
utils.Log(err)
}
// 设置消息处理函数
consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {
data <- message
utils.Log(string(message.Body))
return nil
}))
// 连接到单例nsqd
if err := consumer.ConnectToNSQD(addr); err != nil {
utils.Log(err)
}
<-consumer.StopChan
return nil
}
|
package Solution
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
} |
package chain
import "testing"
func TestHandlerChain_Handle(t *testing.T) {
type fields struct {
Handler Handler
successor *HandlerChain
}
chain := NewHandlerA()
handlerA := NewHandlerA()
handlerB := NewHandlerB()
chain.SetSuccessor(handlerA)
handlerA.SetSuccessor(handlerB)
tests := []struct {
name string
fields fields
}{
// TODO: Add test cases.
{"case1", fields{chain, handlerA}},
{"case2", fields{handlerA, handlerB}},
{"case3", fields{handlerB, nil}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h := &HandlerChain{
Handler: tt.fields.Handler,
successor: tt.fields.successor,
}
h.Handle()
})
}
}
|
package main
// The packer takes all known game records and condenses them into a PackedChampionGameList.
// It outputs the PCGL, which is then used for searching in online queries. All of the
// game fields of the PCGL are in sorted order.
import (
gproto "code.google.com/p/goprotobuf/proto"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"libcleo"
"log"
"net/http"
"proto"
"regexp"
"strings"
"time"
)
var API_KEY = flag.String("apikey", "", "Riot API key")
var RECORD_COUNT = flag.Int("records", 0, "Maximum number of records retrieved")
/**
* StaticRequestInfo defines the data that should be extracted from the
* JSON response that we get back from Riot's API.
*/
type StaticRequestInfo struct {
Data map[string]StaticEntry
}
/**
* StaticEntry defines what a single entry in the output JSON looks
* like. It also acts as the receiving structure for each row in the
* parent StaticRequestInfo, but some of the fields are mutated from
* the value that comes in.
*/
type StaticEntry struct {
Id uint32 `json:"id"`
Name string `json:"name"`
Shortname string `json:"shortname"`
Title string `json:"title"`
Img string `json:"img"`
Games uint32 `json:"games"`
}
type StaticOutputJSON struct {
LastUpdated int64 `json:"lastUpdated"`
NumGames int `json:"numGames"`
Champions []StaticEntry `json:"champions"`
}
/**
* This function generates static output that can be consumed by the
* frontend based on data compiled during the packing process. Additional
* metadata for each champion is also fetched from Riot and included in
* the output.
*
* This function currently writes out championList.json, a file that's
* consumed by frontends that includes a list of all champions and some
* metadata about them, including how many games are included in the
* PCGL for them.
*/
func write_statics(filename string, pcgl libcleo.LivePCGL) {
entries := StaticRequestInfo{}
url := "https://na.api.pvp.net/api/lol/static-data/na/v1.2/champion?&api_key=%s"
log.Println("Requesting latest champion data from Riot...")
// Retrieve a list of all champions according to Riot, along with
// some core info about each (name, title, etc)
resp, err := http.Get(fmt.Sprintf(url, *API_KEY))
if err != nil {
log.Println("Error retrieving data:", err)
return
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
json.Unmarshal(body, &entries)
outjson := StaticOutputJSON{}
// Export the number of games in this pcgl export.
outjson.NumGames = len(pcgl.All)
outjson.LastUpdated = time.Now().Unix()
outjson.Champions = make([]StaticEntry, 0, 200)
// Remove non-alphanumeric characters.
reg, _ := regexp.Compile("[^A-Za-z0-9 ]+")
for _, entry := range entries.Data {
champ := libcleo.Rid2Cleo(entry.Id)
clean_name := reg.ReplaceAllString(entry.Name, "")
entry.Id = uint32(champ)
// Shortname is the clean_name with spaces replaced with underscores (internally defined).
entry.Shortname = strings.ToLower(strings.Replace(clean_name, " ", "_", -1))
// Img path is the clean_name with spaces removed (defined by Riot).
entry.Img = fmt.Sprintf("http://ddragon.leagueoflegends.com/cdn/4.9.1/img/champion/%s.png", strings.Replace(clean_name, " ", "", -1))
entry.Games = uint32(len(pcgl.Champions[champ].Winning) + len(pcgl.Champions[champ].Losing))
outjson.Champions = append(outjson.Champions, entry)
}
data, _ := json.Marshal(outjson)
ioutil.WriteFile(filename, data, 0644)
log.Println(fmt.Sprintf("Written static champion file to %s", filename))
}
func main() {
// TODO: Make this part optional via a command line flag.
flag.Parse()
if *API_KEY == "" {
log.Fatal("You must provide an API key using the -apikey flag.")
}
pcgl := libcleo.LivePCGL{}
pcgl.Champions = make(map[proto.ChampionType]libcleo.LivePCGLRecord)
pcgl.All = make([]libcleo.GameId, 0, 100)
// Read all records from Mongo.
session, _ := mgo.Dial("127.0.0.1:27017")
games_collection := session.DB("lolstat").C("games")
defer session.Close()
log.Println("Connection to MongoDB instance established.")
gid_map := make(map[uint64]libcleo.GameId)
var next_gid libcleo.GameId = 0
// For each record:
// - Get all champions. For each champion:
// - If team won, add game id to pcgl.Champions[champion].Winning
// - If loss, add to .Losing
// - In all cases add to pcgl.All
result := libcleo.RecordContainer{}
query := games_collection.Find(bson.M{})
result_iter := query.Iter()
total_count, _ := query.Count()
current := 1
for result_iter.Next(&result) {
fmt.Print(fmt.Sprintf("Packing %d of %d...", current, total_count), "\r")
game := proto.GameRecord{}
gproto.Unmarshal(result.GameData, &game)
// Map game ID's to something much closer to zero (and tightly
// packed). This will make it possible to work in 32-bit land
// at serving time until we get beyond 4B games. That's far away.
gid, exists := gid_map[*game.GameId]
if exists {
game.GameId = gproto.Uint64(uint64(gid))
} else {
gid_map[*game.GameId] = next_gid
game.GameId = gproto.Uint64(uint64(next_gid))
next_gid += 1
}
for _, team := range game.Teams {
for _, player := range team.Players {
_, exists := pcgl.Champions[*player.Champion]
if !exists {
pcgl.Champions[*player.Champion] = libcleo.LivePCGLRecord{}
}
// Copy this value out. We'll need to reassign a bit later once
// the necessary modifications have been made.
r := pcgl.Champions[*player.Champion]
// If the team won, add this game to this champion's win
// pool.
if *team.Victory {
r.Winning = append(pcgl.Champions[*player.Champion].Winning, libcleo.GameId(*game.GameId))
// If they lost, add it to the loss pool.
} else {
r.Losing = append(pcgl.Champions[*player.Champion].Losing, libcleo.GameId(*game.GameId))
}
// Reassign to the master struct
pcgl.Champions[*player.Champion] = r
}
}
pcgl.All = append(pcgl.All, libcleo.GameId(*game.GameId))
// Optional: once RECORD_COUNT records have been written, stop writing more. If this value
// isn't provided then it defaults to zero, which will never be hit in this loop.
if current == *RECORD_COUNT {
break
}
current += 1
}
// Then convert into the serializable form.
packed_pcgl := proto.PackedChampionGameList{}
for k, v := range pcgl.Champions {
record := proto.PackedChampionGameList_ChampionGameList{}
record.Champion = k.Enum()
// Copy over casted values. They're the same type but v.Winning
// uses an aliased type and Go doesn't recognize that they're the
// same...
// TODO: is there a better (faster, more memory efficient) way
// to do this without removing the type alias?
for _, val := range v.Winning {
record.Winning = append(record.Winning, uint32(val))
}
for _, val := range v.Losing {
record.Losing = append(record.Losing, uint32(val))
}
packed_pcgl.Champions = append(packed_pcgl.Champions, &record)
}
for _, val := range pcgl.All {
packed_pcgl.All = append(packed_pcgl.All, uint32(val))
}
data, _ := gproto.Marshal(&packed_pcgl)
// Write to file.
err := ioutil.WriteFile("all.pcgl", data, 0644)
if err != nil {
log.Fatal("Could not write PCGL file.")
} else {
log.Println(fmt.Sprintf("Successfully wrote %d records to all.pcgl.", len(packed_pcgl.All)))
}
write_statics("html/static/data/metadata.json", pcgl)
}
|
// Copyright 2020 The Amadeus Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
/**********************************
*
*start of the kafka connect api
*
***********************************/
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "operator-sdk generate crds" to regenerate code after modifying this file
// KafkaConnectSpec defines the desired state of KafkaConnect
// this resource will create a Deployment and a Service by default
// if IngressSpec is given, then it will also create an ingress obj
type KafkaConnectSpec struct {
// PodSpec use the default k8s.io/api/core/v1/PodSpec
// for more information check https://godoc.org/k8s.io/api/core/v1#PodSpec
PodSpec corev1.PodSpec `json:"podSpec"`
// IngressSpec define how the ingress object should be created
// special role should be given to the related service account.
// if nil no ingress object will be created by operator, you can always creat your own ingress
IngressSpec *IngressSpec `json:"ingressSpec,omitempty"`
// KafkaConnectorsSpec define the different
KafkaConnectorsSpec *KafkaConnectorsSpec `json:"connectors,omitempty"`
//scaleStabilizationSec is the duration in sec between each scale action
ScaleStabilizationSec *int64 `json:"scaleStabilizationSec,omitempty"`
// RestApiPort tell which one is the port for rest api, if more than one port exist in pod spec
KafkaConnectRestAPIPort int32 `json:"kafkaConnectRestAPIPort"`
}
type ConfigMap struct {
Name string `json:"name"`
Item string `json:"item"`
}
// IngressSpec defines how we need to create ingress to expose the kafka connect rest api
type IngressSpec struct {
// Style define the style of the ingress created, either a subDomain or a path
Style *IngressStyle `json:"style,omitempty"`
// the parent domain from where the ingress will be created
ParentDomain string `json:"parentDomain"`
}
// IngressStyle define the style of the ingress created, either a subDomain or a path
type IngressStyle string
const (
// PathStyle will create ingress like parentDomain/kafkaconnectName
PathStyle IngressStyle = "pathStyle"
// DomainStyle will create ingress like kafkaconnectName.parentDomain
DomainStyle IngressStyle = "domainStyle"
)
// KafkaConnectorsSpec define the list of the connector config and the number of task per pod
type KafkaConnectorsSpec struct {
// connectorConfigs is the list of the kafka connector config
Configs []KafkaConnectorConfig `json:"connectorConfigs,omitempty"`
// taskPerPod is the number of the task per pod, default: 1, if <=0 autoscaler will never work and initPodReplicas will be used to create deployment
TaskPerPod *int32 `json:"taskPerPod,omitempty"`
//initPodReplicas is the initial number of the pod if TaskPerPod is <=0
InitDeploymentReplicas *int32 `json:"initPodReplicas,omitempty"`
//keepUnknownConnectors by default false, if false, it will delete all unknown connector in the cluster
KeepUnknownConnectors bool `json:"keepUnknownConnectors,omitempty"`
//kafkaBrokers is the brokers of kafka cluster
KafkaBrokers string `json:"kafkaBrokers"`
}
// KafkaConnectorConfig define one connector will be created if it's different from the existing one
type KafkaConnectorConfig struct {
// Name is the Name should be used as the connector
Name string `json:"name"`
// URL is the link will connector config could be found
URL *string `json:"url,omitempty"`
// configMap is the link will connector config could be found
ConfigMap *ConfigMap `json:"configMap,omitempty"`
// TasksMax define number of task for connector it will override the value in config. the default value is the nb defined in the config
TasksMax *int32 `json:"taskMax,omitempty"`
//exposeLagMetric tell if lag should be expose or not
ExposeLagMetric bool `json:"exposeLagMetric,omitempty"`
}
// KafkaConnectorStatus defines the observed state of each KafkaConnector
// +k8s:openapi-gen=true
type KafkaConnectorStatus struct {
Name string `json:"name"`
TaskNb int32 `json:"taskNb"`
Error string `json:"error,omitempty"`
}
// KafkaConnectStatus defines the observed state of KafkaConnect
// +k8s:openapi-gen=true
type KafkaConnectStatus struct {
KafkaConnectorStatus []KafkaConnectorStatus `json:"connectorStatus"`
PodNb int32 `json:"podNb"`
Updating bool `json:"updating"`
LastScaleTime metav1.Time `json:"lastScaleTime"`
}
// KafkaConnect is the Schema for the kafkaconnects API
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=kafkaconnects,scope=Namespaced
// +genclient
type KafkaConnect struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KafkaConnectSpec `json:"spec"`
Status *KafkaConnectStatus `json:"status,omitempty"`
}
// KafkaConnectList contains a list of KafkaConnect
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type KafkaConnectList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []KafkaConnect `json:"items"`
}
/**********************************
*
*end of the kafka connect api
*
***********************************/
/**********************************
*
*start of the kafka connect scaler api
*
***********************************/
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// KafkaConnectAutoScalerSpec defines the desired state of KafkaConnectAutoScaler
// +k8s:openapi-gen=true
type KafkaConnectAutoScalerSpec struct {
// kcScaleTargetRef points to the kafka connector to scale, and is used to the kafkaconnect for which metrics
// should be collected, as well as to actually change the replica count.
KafkaConnectorScaleTargetRef KafkaConnectorReference `json:"kcScaleTargetRef" protobuf:"bytes,1,opt,name=kcScaleTargetRef"`
// minTasks is the lower limit for the number of replicas to which the autoscaler
// can scale down. It defaults to 1 pod. minTasks is allowed to be 0 if the
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
// metric is configured. Scaling is active as long as at least one metric value is
// available.
// +optional
MinTasks *int32 `json:"minTasks,omitempty" protobuf:"varint,2,opt,name=minTasks"`
// maxTasks is the upper limit for the number of replicas to which the autoscaler can scale up.
// It cannot be less that minTasks.
MaxTasks int32 `json:"maxTasks" protobuf:"varint,3,opt,name=maxTasks"`
// metrics contains the specifications for which to use to calculate the
// desired replica count (the maximum replica count across all metrics will
// be used). The desired replica count is calculated multiplying the
// ratio between the target value and the current value by the current
// number of pods. Ergo, metrics used must decrease as the pod count is
// increased, and vice-versa. See the individual metric source types for
// more information about how each type of metric must respond.
// If not set, the default metric will be set to 80% average CPU utilization.
// +optional
Metrics []autoscalingv2beta2.MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"`
}
// KafkaConnectorReference contains enough information to let you identify the referred kafka connector.
type KafkaConnectorReference struct {
// the name of the kafkaconnect CR
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
// API version of the referent
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
//kafkaConnectorName name of a specific kafka connector in the kafka connect resource object
KafkaConnectorName string `json:"kafkaConnectorName"`
}
// KafkaConnectAutoScalerStatus defines the observed state of KafkaConnectAutoScaler
// +k8s:openapi-gen=true
type KafkaConnectAutoScalerStatus struct {
autoscalingv2beta2.HorizontalPodAutoscalerStatus `json:",inline"`
}
// KafkaConnectAutoScaler is the Schema for the kafkaconnectautoscalers API
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=kafkaconnectautoscalers,scope=Namespaced
// +genclient
type KafkaConnectAutoScaler struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KafkaConnectAutoScalerSpec `json:"spec,omitempty"`
Status KafkaConnectAutoScalerStatus `json:"status,omitempty"`
}
// KafkaConnectAutoScalerList contains a list of KafkaConnectAutoScaler
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type KafkaConnectAutoScalerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []KafkaConnectAutoScaler `json:"items"`
}
/**********************************
*
*end of the kafka connect scaler api
*
***********************************/
func init() {
SchemeBuilder.Register(&KafkaConnect{}, &KafkaConnectList{})
SchemeBuilder.Register(&KafkaConnectAutoScaler{}, &KafkaConnectAutoScalerList{})
}
//CreateFakeKafkaConnect is the function to create kafkaconnect object for unit test only
func CreateFakeKafkaConnect() *KafkaConnect {
var (
name = "connector-elastic"
namespace = "kafkaconnect"
labels = map[string]string{
"app": "kafkaconnect",
"type": "elasticsearch-sink",
}
port int32 = 8083
style IngressStyle = "domainStyle"
parentDomain = "apps-crc.testing"
taskPerPod int32 = 1
connectorName = "connector-elastic"
connectorURL = "https://raw.githubusercontent.com/amadeusitgroup/kubernetes-kafka-connect-operator/master/connector-examples/connector1.json"
image = "test/kafkaconnectdockerimage:latest"
imagePullPolicy corev1.PullPolicy = "IfNotPresent"
resourceLimits corev1.ResourceList = map[corev1.ResourceName]resource.Quantity{
"cpu": {
Format: "200m",
},
"memory": {
Format: "300Mi",
},
}
resourceRequests corev1.ResourceList = map[corev1.ResourceName]resource.Quantity{
"cpu": {
Format: "100m",
},
"memory": {
Format: "80Mi",
},
}
)
// Create a KafkaConnect object.
kafkaconnect := &KafkaConnect{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: labels,
},
Spec: KafkaConnectSpec{
KafkaConnectRestAPIPort: port,
IngressSpec: &IngressSpec{
Style: &style,
ParentDomain: parentDomain,
},
KafkaConnectorsSpec: &KafkaConnectorsSpec{
Configs: []KafkaConnectorConfig{
{
Name: connectorName,
URL: &connectorURL,
},
},
TaskPerPod: &taskPerPod,
},
PodSpec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: namespace,
Image: image,
ImagePullPolicy: imagePullPolicy,
Ports: []corev1.ContainerPort{
{
ContainerPort: port,
},
},
Resources: corev1.ResourceRequirements{
Limits: resourceLimits,
Requests: resourceRequests,
},
},
},
},
},
}
return kafkaconnect
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/12/12 10:15 上午
# @File : lt_16_最接近的三数之和_test.go.go
# @Description :
# @Attention :
*/
package hot100
import (
"fmt"
"testing"
)
func Test_threeSumClosest(t *testing.T) {
ints := make([]int, 0)
ints = append(ints, 0, 2, 1, -3)
fmt.Println(threeSumClosest(ints, 1))
}
func TestPrintArray(t *testing.T) {
arr := getArray()
fmt.Println(len(arr))
}
func getArray() []int {
return nil
}
|
package proxy
import (
"context"
"net/http"
"github.com/pomerium/csrf"
"github.com/pomerium/datasource/pkg/directory"
"github.com/pomerium/pomerium/internal/encoding/jws"
"github.com/pomerium/pomerium/internal/handlers"
"github.com/pomerium/pomerium/internal/handlers/webauthn"
"github.com/pomerium/pomerium/internal/httputil"
"github.com/pomerium/pomerium/internal/sessions"
"github.com/pomerium/pomerium/internal/urlutil"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/grpc/session"
"github.com/pomerium/pomerium/pkg/grpc/user"
"github.com/pomerium/pomerium/pkg/webauthnutil"
)
func (p *Proxy) getSession(ctx context.Context, sessionID string) (s *session.Session, isImpersonated bool, err error) {
client := p.state.Load().dataBrokerClient
isImpersonated = false
s, err = session.Get(ctx, client, sessionID)
if s.GetImpersonateSessionId() != "" {
s, err = session.Get(ctx, client, s.GetImpersonateSessionId())
isImpersonated = true
}
return s, isImpersonated, err
}
func (p *Proxy) getSessionState(r *http.Request) (sessions.State, error) {
state := p.state.Load()
rawJWT, err := state.sessionStore.LoadSession(r)
if err != nil {
return sessions.State{}, err
}
encoder, err := jws.NewHS256Signer(state.sharedKey)
if err != nil {
return sessions.State{}, err
}
var sessionState sessions.State
if err := encoder.Unmarshal([]byte(rawJWT), &sessionState); err != nil {
return sessions.State{}, httputil.NewError(http.StatusBadRequest, err)
}
return sessionState, nil
}
func (p *Proxy) getUser(ctx context.Context, userID string) (*user.User, error) {
client := p.state.Load().dataBrokerClient
return user.Get(ctx, client, userID)
}
func (p *Proxy) getUserInfoData(r *http.Request) (handlers.UserInfoData, error) {
options := p.currentOptions.Load()
state := p.state.Load()
data := handlers.UserInfoData{
CSRFToken: csrf.Token(r),
BrandingOptions: options.BrandingOptions,
}
ss, err := p.getSessionState(r)
if err != nil {
return handlers.UserInfoData{}, err
}
data.Session, data.IsImpersonated, err = p.getSession(r.Context(), ss.ID)
if err != nil {
data.Session = &session.Session{Id: ss.ID}
}
data.User, err = p.getUser(r.Context(), data.Session.GetUserId())
if err != nil {
data.User = &user.User{Id: data.Session.GetUserId()}
}
data.WebAuthnCreationOptions, data.WebAuthnRequestOptions, _ = p.webauthn.GetOptions(r)
data.WebAuthnURL = urlutil.WebAuthnURL(r, urlutil.GetAbsoluteURL(r), state.sharedKey, r.URL.Query())
p.fillEnterpriseUserInfoData(r.Context(), &data)
return data, nil
}
func (p *Proxy) fillEnterpriseUserInfoData(ctx context.Context, data *handlers.UserInfoData) {
client := p.state.Load().dataBrokerClient
res, _ := client.Get(ctx, &databroker.GetRequest{Type: "type.googleapis.com/pomerium.config.Config", Id: "dashboard"})
data.IsEnterprise = res.GetRecord() != nil
if !data.IsEnterprise {
return
}
data.DirectoryUser, _ = databroker.GetViaJSON[directory.User](ctx, client, directory.UserRecordType, data.Session.GetUserId())
if data.DirectoryUser != nil {
for _, groupID := range data.DirectoryUser.GroupIDs {
directoryGroup, _ := databroker.GetViaJSON[directory.Group](ctx, client, directory.GroupRecordType, groupID)
if directoryGroup != nil {
data.DirectoryGroups = append(data.DirectoryGroups, directoryGroup)
}
}
}
}
func (p *Proxy) getWebauthnState(r *http.Request) (*webauthn.State, error) {
options := p.currentOptions.Load()
state := p.state.Load()
ss, err := p.getSessionState(r)
if err != nil {
return nil, err
}
s, _, err := p.getSession(r.Context(), ss.ID)
if err != nil {
return nil, err
}
authenticateURL, err := options.GetAuthenticateURL()
if err != nil {
return nil, err
}
internalAuthenticateURL, err := options.GetInternalAuthenticateURL()
if err != nil {
return nil, err
}
return &webauthn.State{
AuthenticateURL: authenticateURL,
InternalAuthenticateURL: internalAuthenticateURL,
SharedKey: state.sharedKey,
Client: state.dataBrokerClient,
Session: s,
SessionState: &ss,
SessionStore: state.sessionStore,
RelyingParty: webauthnutil.GetRelyingParty(r, state.dataBrokerClient),
BrandingOptions: options.BrandingOptions,
}, nil
}
|
package utils
import (
"BcRPCCode/entity"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
/**
准备json——rpc通信的数据格式
*/
func RpcRequest(method string, params ...interface{}) []byte {
rpcRequest := entity.RPCRequest{
Id: time.Now().Unix(),
Method: method,
Jsonrpc: "2.0",
}
if params != nil {
rpcRequest.Params = params
}
reqBytes, err := json.Marshal(&rpcRequest) //序列化
if err != nil {
fmt.Println(err.Error())
return nil
}
//2.发送post请求
client := &http.Client{}
request, err := http.NewRequest("POST", RPCURL, bytes.NewBuffer(reqBytes))
if err != nil {
fmt.Println(err.Error())
return nil
}
//请求头设置
request.Header.Add("Encoding", "UTF-8")
request.Header.Add("Content-Type", "application/json")
request.Header.Add("Authorization", "Basic "+Base64())
//java:HttpResponse reponse = client.exwcute(post);
//java返回响应类:HttpResonse
response, err := client.Do(request)
if err != nil {
fmt.Println(err.Error())
return nil
}
resByte, _ := ioutil.ReadAll(response.Body)
code := response.StatusCode
if code == 200 {
fmt.Println("请求成功!")
} else {
fmt.Println("请求失败", code)
}
return resByte
} |
package entity
type SignTransactionEntity struct {
RefBlockNum string `json:"ref_block_num"`
RefBlockPrefix string `json:"ref_block_prefix"`
Expiration string `json:"expiration"`
Scope [2]string `json:"scope"`
ReadScope []interface{} `json:"read_scope"`
Messages [1]Message `json:"messages"`
Signatures []string `json:"signatures"`
}
type Message struct {
Code string `json:"code"`
Type string `json:"type"`
Authorization [1]struct {
Actor string `json:"actor"`
Permission string `json:"permission"`
} `json:"authorization"`
Data string `json:"data"`
}
type SignResult struct {
Expiration string `json:"expiration"`
RefBlockNum int `json:"ref_block_num"`
RefBlockPrefix int64 `json:"ref_block_prefix"`
MaxNetUsageWords int `json:"max_net_usage_words"`
MaxCPUUsageMs int `json:"max_cpu_usage_ms"`
DelaySec int `json:"delay_sec"`
ContextFreeActions []interface{} `json:"context_free_actions"`
Actions []interface{} `json:"actions"`
TransactionExtensions []interface{} `json:"transaction_extensions"`
Signatures []string `json:"signatures"`
ContextFreeData []interface{} `json:"context_free_data"`
} |
package service
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"towelong/mogu/model"
"towelong/mogu/utils"
)
const (
url = "https://api.moguding.net:9000"
)
// MoGuService generate a serious of function's interfaces.
type MoGuService interface {
MoGuLogin(account, password string) string
GetPlanID(token string) string
SignIn(token, planID string) (bool, string)
WeeklyDiary(token, planID string) (bool, string)
}
// moGuService is a empty struction, which include a serious of functions.
// mainly, in order to let it face to Object.
type moGuService struct {
}
// NewMoGuService init struction.
func NewMoGuService() MoGuService {
return new(moGuService)
}
// MoGuLogin is a login logic of MoGu.
// account: When user register application,it is usually a phone number.
// password: Create by User
func (m moGuService) MoGuLogin(account, password string) string {
body := map[string]string{
"phone": account,
"password": password,
"loginType": "android",
"uuid": "",
}
client := &http.Client{}
form, _ := json.Marshal(body)
request, err := http.NewRequest(
"POST",
url+"/session/user/v1/login",
bytes.NewReader(form),
)
if err == nil {
request.Header.Add("accept-language", "zh-CN,zh;q=0.8")
request.Header.Add("user-agent", "Mozilla/5.0 (Linux; U; Android 9; zh-cn; ONEPLUS A6010 Build/PKQ1.180716.001) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1")
request.Header.Add("content-type", "application/json; charset=UTF-8")
request.Header.Add("cache-control", "no-cache")
resp, error := client.Do(request)
if error == nil {
defer resp.Body.Close()
result, _ := ioutil.ReadAll(resp.Body)
var data model.DataModel
json.Unmarshal(result, &data)
if data.Code == 200 {
return data.Data.Token
}
}
}
return ""
}
// getPlanID get task id
func (m moGuService) GetPlanID(token string) string {
body := map[string]string{
"paramsType": "student",
}
client := &http.Client{}
form, _ := json.Marshal(body)
request, err := http.NewRequest(
"POST",
url+"/practice/plan/v1/getPlanByStu",
bytes.NewReader(form),
)
if err == nil {
request.Header.Add("user-agent", "Mozilla/5.0 (Linux; U; Android 9; zh-cn; ONEPLUS A6010 Build/PKQ1.180716.001) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1")
request.Header.Add("content-type", "application/json; charset=UTF-8")
request.Header.Add("Authorization", token)
request.Header.Add("roleKey", "student")
resp, error := client.Do(request)
if error == nil {
defer resp.Body.Close()
result, _ := ioutil.ReadAll(resp.Body)
var data model.PlanModel
json.Unmarshal(result, &data)
if data.Code == 200 {
return data.Data[0].PlanID
}
}
}
return ""
}
// SignIn signIn Logic
func (m moGuService) SignIn(token, planID string) (bool, string) {
address := os.Getenv("ADDRESS")
city := os.Getenv("CITY")
province := os.Getenv("PROVINCE")
longitude := os.Getenv("LONGITUDE")
latitude := os.Getenv("LATITUDE")
if address == "" && longitude == "" && city == "" {
log.Fatal("failed to Load secret ")
}
// 自动计算 上午 or 下午
// 上午为 上班打卡;下午为 下班打卡
types := utils.TimePicker()
body := &model.SignInModel{
Device: "Android",
PlanID: planID,
Country: "中国",
Type: types, // 默认打卡上班
AttendanceType: "",
State: "NORMAL",
Address: address,
Longitude: longitude,
Latitude: latitude,
City: city,
Province: province,
}
client := &http.Client{}
form, _ := json.Marshal(body)
request, err := http.NewRequest(
"POST",
url+"/attendence/clock/v1/save",
bytes.NewReader(form),
)
if err == nil {
request.Header.Add("user-agent", "Mozilla/5.0 (Linux; U; Android 9; zh-cn; ONEPLUS A6010 Build/PKQ1.180716.001) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1")
request.Header.Add("content-type", "application/json; charset=UTF-8")
request.Header.Add("Authorization", token)
resp, error := client.Do(request)
if error == nil {
defer resp.Body.Close()
result, _ := ioutil.ReadAll(resp.Body)
var data map[string]interface{}
json.Unmarshal(result, &data)
if data["code"].(float64) == 200 {
return true, types
}
fmt.Println(data["msg"])
}
}
return false, utils.ERROR
}
// WeeklyDiary it will be automatic writing weekly diary.
func (m moGuService) WeeklyDiary(token, planID string) (bool, string) {
if !(time.Now().UTC().Weekday() == time.Saturday && utils.TimePicker() == utils.END) {
return false, utils.NOWEEK
}
sentence, randomErr := utils.RandomSentence()
fmt.Println(sentence)
if randomErr != nil {
log.Fatal(randomErr)
}
currentWeek, startTime, endTime := utils.WeeklyPicker(time.Now())
body := &model.WeekWriterModel{
AttachmentList: []string{},
Attachments: "",
PlanID: planID,
ReportType: "week",
Title: fmt.Sprintf("第%v周周报", currentWeek),
Content: sentence,
Weeks: fmt.Sprintf("第%v周", currentWeek),
StartTime: startTime,
EndTime: endTime,
}
client := &http.Client{}
form, _ := json.Marshal(body)
request, err := http.NewRequest(
"POST",
url+"/practice/paper/v1/save",
bytes.NewReader(form),
)
if err == nil {
request.Header.Add("user-agent", "Mozilla/5.0 (Linux; U; Android 9; zh-cn; ONEPLUS A6010 Build/PKQ1.180716.001) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1")
request.Header.Add("content-type", "application/json; charset=UTF-8")
request.Header.Add("Authorization", token)
resp, error := client.Do(request)
if error == nil {
defer resp.Body.Close()
result, _ := ioutil.ReadAll(resp.Body)
var data map[string]interface{}
json.Unmarshal(result, &data)
if data["code"].(float64) == 200 {
return true, utils.WEEK
}
if data["code"].(float64) == 500 {
fmt.Println(data["msg"])
return false, utils.NOWEEK
}
}
}
return false, utils.ERROR
}
|
package slacktest
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/slack-go/slack"
)
func TestRTMInfo(t *testing.T) {
maxWait := 10 * time.Millisecond
s := NewTestServer()
go s.Start()
api := slack.New("ABCDEFG", slack.OptionAPIURL(s.GetAPIURL()))
rtm := api.NewRTM()
go rtm.ManageConnection()
messageChan := make(chan (*slack.ConnectedEvent), 1)
go func() {
for msg := range rtm.IncomingEvents {
switch ev := msg.Data.(type) {
case *slack.ConnectedEvent:
messageChan <- ev
}
}
}()
select {
case m := <-messageChan:
assert.Equal(t, s.BotID, m.Info.User.ID, "bot id did not match")
assert.Equal(t, s.BotName, m.Info.User.Name, "bot name did not match")
break
case <-time.After(maxWait):
assert.FailNow(t, "did not get connected event in time")
}
}
func TestRTMPing(t *testing.T) {
if testing.Short() {
t.Skip("skipping timered test")
}
maxWait := 45 * time.Second
s := NewTestServer()
go s.Start()
api := slack.New("ABCDEFG", slack.OptionAPIURL(s.GetAPIURL()))
rtm := api.NewRTM()
go rtm.ManageConnection()
messageChan := make(chan (*slack.LatencyReport), 1)
go func() {
for msg := range rtm.IncomingEvents {
switch ev := msg.Data.(type) {
case *slack.LatencyReport:
messageChan <- ev
}
}
}()
select {
case m := <-messageChan:
assert.NotEmpty(t, m.Value, "latency report should value a value")
assert.True(t, m.Value > 0, "latency report should be greater than 0")
break
case <-time.After(maxWait):
assert.FailNow(t, "did not get latency report in time")
}
}
func TestRTMDirectMessage(t *testing.T) {
maxWait := 5 * time.Second
s := NewTestServer()
go s.Start()
api := slack.New("ABCDEFG", slack.OptionAPIURL(s.GetAPIURL()))
rtm := api.NewRTM()
go rtm.ManageConnection()
messageChan := make(chan (*slack.MessageEvent), 1)
go func() {
for msg := range rtm.IncomingEvents {
switch ev := msg.Data.(type) {
case *slack.MessageEvent:
messageChan <- ev
}
}
}()
s.SendDirectMessageToBot("some text")
select {
case m := <-messageChan:
assert.Equal(t, defaultNonBotUserID, m.User)
assert.Equal(t, "D024BE91L", m.Channel)
assert.Equal(t, "some text", m.Text)
break
case <-time.After(maxWait):
assert.FailNow(t, "did not get direct message in time")
}
}
func TestRTMChannelMessage(t *testing.T) {
maxWait := 5 * time.Second
s := NewTestServer()
go s.Start()
api := slack.New("ABCDEFG", slack.OptionAPIURL(s.GetAPIURL()))
rtm := api.NewRTM()
go rtm.ManageConnection()
messageChan := make(chan (*slack.MessageEvent), 1)
go func() {
for msg := range rtm.IncomingEvents {
switch ev := msg.Data.(type) {
case *slack.MessageEvent:
messageChan <- ev
}
}
}()
s.SendMessageToChannel("#foochan", "some text")
select {
case m := <-messageChan:
assert.Equal(t, "#foochan", m.Channel)
assert.Equal(t, "some text", m.Text)
break
case <-time.After(maxWait):
assert.FailNow(t, "did not get channel message in time")
}
}
|
package user
import (
"ego/src/commons"
"fmt"
)
//根据用户名和密码查询
func SelByUnPwdDao(un,pwd string) *TbUser {
sql :="select * from tb_user where username =? and password=? or email =? and password=?"
rows,err:=commons.Dql(sql,un,pwd,un,pwd)
//fmt.Println(rows)
if err !=nil{
fmt.Println(err)
return nil
}
if rows.Next(){
user:=new(TbUser)
rows.Scan(&user.Id,&user.Username,&user.Password,&user.Phone,&user.Email,&user.Created,&user.Updated)
commons.CloseConn()
return user
}
return nil
}
|
/*
Design a random number generator where the i th number has i% chance of occurring for all 0 < i < 14. 0 should have exactly 9% chance of occurring.
The seed for the generator should be the system time. You cannot use a pre-defined function for random number generation.
Basically 1 has 1% chance of occurring, 2 has 2% chance and so on up to 13 having 13% chance of occurring. This is code-golf, so the shortest code wins.
*/
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
for i := 0; i <= 14; i++ {
test(i)
}
}
func test(n int) {
t := 10000000
c := 0
for i := 0; i < t; i++ {
if gen(n) == n {
c++
}
}
fmt.Printf("%f\n", float64(c)/float64(t)*100)
}
func gen(n int) int {
p := 0.09
if n != 0 {
p = float64(n) / 100
}
if rand.Float64() <= p {
return n
}
return rand.Intn(n+1) + n + 1
}
|
package powervs
// Platform stores all the global configuration that all machinesets
// use.
type Platform struct {
// ServiceInstanceID is the ID of the Power IAAS instance created from the IBM Cloud Catalog
ServiceInstanceID string `json:"serviceInstanceID"`
// PowerVSResourceGroup is the resource group in which Power VS resources will be created.
PowerVSResourceGroup string `json:"powervsResourceGroup"`
// Region specifies the IBM Cloud colo region where the cluster will be created.
Region string `json:"region,omitempty"`
// Zone specifies the IBM Cloud colo region where the cluster will be created.
// At this time, only single-zone clusters are supported.
Zone string `json:"zone"`
// VPCRegion specifies the IBM Cloud region in which to create VPC resources.
// Leave unset to allow installer to select the closest VPC region.
//
// +optional
VPCRegion string `json:"vpcRegion,omitempty"`
// UserID is the login for the user's IBM Cloud account.
UserID string `json:"userID"`
// VPCName is the name of a pre-created VPC inside IBM Cloud.
//
// +optional
VPCName string `json:"vpcName,omitempty"`
// VPCSubnets specifies existing subnets (by ID) where cluster
// resources will be created. Leave unset to have the installer
// create subnets in a new VPC on your behalf.
//
// +optional
VPCSubnets []string `json:"vpcSubnets,omitempty"`
// PVSNetworkName specifies an existing network within the Power VS Service Instance.
//
// +optional
PVSNetworkName string `json:"pvsNetworkName,omitempty"`
// ClusterOSImage is a pre-created Power VS boot image that overrides the
// default image for cluster nodes.
//
// +optional
ClusterOSImage string `json:"clusterOSImage,omitempty"`
// DefaultMachinePlatform is the default configuration used when
// installing on Power VS for machine pools which do not define their own
// platform configuration.
// +optional
DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"`
// CloudConnctionName is the name of an existing Power VS Cloud connection.
// If empty, one is created by the installer.
// +optional
CloudConnectionName string `json:"cloudConnectionName,omitempty"`
}
|
package wsStorage
import "errors"
var (
ErrIsExisted = errors.New("ws connection is existed")
ErrConnectionNotFound = errors.New("ws connection is not found")
ErrInvalidDuration = errors.New("invalid duration value")
)
|
package ircserver
import (
"testing"
"time"
"github.com/robustirc/robustirc/internal/config"
"github.com/robustirc/robustirc/internal/robust"
"gopkg.in/sorcix/irc.v2"
)
func stdIRCServerWithServices() (*IRCServer, map[string]robust.Id) {
i, ids := stdIRCServer()
i.Config.IRC.Services = append(i.Config.IRC.Services, config.Service{
Password: "mypass",
})
ids["services"] = robust.Id{Id: 0x13c6cdee3e749faf}
i.CreateSession(ids["services"], "auth-server", time.Unix(0, int64(ids["services"].Id)))
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("PASS :services=mypass"))
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("SERVER services.robustirc.net 1 :Services for IRC Networks"))
return i, ids
}
func TestServerHandshake(t *testing.T) {
i, ids := stdIRCServer()
i.Config.IRC.Services = append(i.Config.IRC.Services, config.Service{
Password: "mypass",
})
i.ProcessMessage(&robust.Message{Session: ids["secure"]}, irc.ParseMessage("OPER mero foo"))
i.ProcessMessage(&robust.Message{Session: ids["mero"]}, irc.ParseMessage("JOIN #test"))
ids["services"] = robust.Id{Id: 0x13c6cdee3e749faf}
i.CreateSession(ids["services"], "auth-server", time.Unix(0, int64(ids["services"].Id)))
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("PASS :services=wrong")),
[]*irc.Message{})
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("SERVER services.robustirc.net 1 :Services for IRC Networks")),
"ERROR :Invalid password")
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("PASS :services=mypass")),
[]*irc.Message{})
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("SERVER services.robustirc.net 1 :Services for IRC Networks")),
[]*irc.Message{
irc.ParseMessage("SERVER robustirc.net 1 23"),
irc.ParseMessage("NICK mero 1 1 foo robust/0x13b5aa0a2bcfb8ae robustirc.net 0 + :Axel Wagner"),
irc.ParseMessage(":robustirc.net SJOIN 1 #test :@mero"),
irc.ParseMessage("NICK sECuRE 1 1 blah robust/0x13b5aa0a2bcfb8ad robustirc.net 0 +o :Michael Stapelberg"),
irc.ParseMessage("NICK xeen 1 1 baz robust/0x13b5aa0a2bcfb8af robustirc.net 0 + :Iks Enn"),
})
}
func TestServerSjoin(t *testing.T) {
i, ids := stdIRCServerWithServices()
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["secure"]}, irc.ParseMessage("SJOIN 1 #test :ChanServ")),
":robustirc.net 421 sECuRE SJOIN :Unknown command")
}
func TestServerKickKill(t *testing.T) {
i, ids := stdIRCServerWithServices()
i.ProcessMessage(&robust.Message{Session: ids["secure"]}, irc.ParseMessage("JOIN #test"))
i.ProcessMessage(&robust.Message{Session: ids["mero"]}, irc.ParseMessage("JOIN #test"))
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("NICK ChanServ 1 1422134861 services robustirc.net services.robustirc.net 0 :ChanServ"))
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage("NICK NickServ 1 1422134861 services robustirc.net services.robustirc.net 0 :NickServ"))
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ KICK #test sECuRE :bye")),
":ChanServ!services@services KICK #test sECuRE :bye")
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ KICK #test sECuRE :bye")),
":robustirc.net 441 ChanServ sECuRE #test :They aren't on that channel")
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ KICK #toast sECuRE :bye")),
":robustirc.net 403 ChanServ #toast :No such nick/channel")
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["xeen"]}, irc.ParseMessage("JOIN #TEST")),
[]*irc.Message{
irc.ParseMessage(":xeen!baz@robust/0x13b5aa0a2bcfb8af JOIN :#TEST"),
irc.ParseMessage(":robustirc.net SJOIN 1 #TEST :xeen"),
irc.ParseMessage(":robustirc.net 324 xeen #TEST +nt"),
irc.ParseMessage(":robustirc.net 331 xeen #TEST :No topic is set"),
irc.ParseMessage(":robustirc.net 353 xeen = #TEST :mero xeen"),
irc.ParseMessage(":robustirc.net 366 xeen #TEST :End of /NAMES list."),
})
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":NickServ KILL mero")),
":robustirc.net 461 * KILL :Not enough parameters")
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":NickServ KILL you :nope")),
":robustirc.net 401 * you :No such nick/channel")
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":NickServ KILL mero :Too many wrong passwords")),
[]*irc.Message{
irc.ParseMessage(":NickServ!services@robust/0x13c6cdee3e749faf KILL mero :ircd!robust/0x13c6cdee3e749faf!NickServ (Too many wrong passwords)"),
irc.ParseMessage(":mero!foo@robust/0x13b5aa0a2bcfb8ae QUIT :Killed: Too many wrong passwords"),
})
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":services.robustirc.net KILL secure :Too many wrong passwords")),
[]*irc.Message{
irc.ParseMessage(":services.robustirc.net KILL sECuRE :ircd!services.robustirc.net (Too many wrong passwords)"),
irc.ParseMessage(":sECuRE!blah@robust/0x13b5aa0a2bcfb8ad QUIT :Killed: Too many wrong passwords"),
})
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ KICK #test xeen :bye")),
":ChanServ!services@services KICK #test xeen :bye")
}
|
package socks
type Channel struct {
Session
}
|
package main
import (
"encoding/csv"
"fmt"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat/distmv"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/plotutil"
"gonum.org/v1/plot/vg"
"log"
"math"
"os"
"strconv"
"time"
)
var randSource = rand.NewSource(uint64(time.Now().UnixNano()))
func main () {
//parameters
modelStdDev := 0.1
measurementStdDev := 0.3
const numDataPoints = 10
T := 1.0 //sampling interval
//define A, Q, H, R
A := mat.NewDense(4,4, []float64{1, T, 0, 0, 0, 1, 0, 0, 0, 0, 1, T, 0, 0, 0, 1})
Q := mat.NewSymDense(4, []float64{math.Pow(T, 3)/3.0, math.Pow(T, 2)/2.0, 0, 0,
math.Pow(T, 2)/2.0, T, 0, 0,
0, 0,math.Pow(T, 3)/3.0, math.Pow(T, 2)/2.0,
0, 0, math.Pow(T, 2)/2.0, T}) //temp
Q.ScaleSym(math.Pow(modelStdDev, 2), Q)
H := mat.NewDense(2,4, []float64{1, 0, 0, 0, 0, 0, 1, 0})
R := scaledId(2, math.Pow(measurementStdDev, 2))
//generate and store all random data in matrix
//constData := 100.0
//dataDist := Gaussian{mean: constData, stdDev: 5}
data := make([]mat.Matrix, numDataPoints) //slice of matrices
noisyData := make([]mat.Matrix, numDataPoints) //slice of matrices
x0 := mat.NewDense(4,1, []float64{0, 0.3, 0, .3}) //initial state
data[0] = x0
noisyData[0] = x0
for i := 1; i < numDataPoints; i++ {
data[i] = getNewState(data[i-1], A, Q) //todo: see if i broke this (always reassigning)
measuredData := mat.NewDense(2, 1, []float64{data[i].At(0, 0), data[i].At(2, 0)})
noisyData[i] = getNewState(measuredData, scaledId(2, 1), R) //todo: fix workaround
}
//initialize slices to store states and covariances & set initial values
states := make([]mat.Matrix, numDataPoints) //slice of matrices
states[0] = x0
//initialize variables for innovation, predicted measurement covariance, and Kalman gain
v := mat.NewDense(2,1, nil)
S := mat.NewDense(2,2, nil)
S_inv := mat.NewDense(2,2, nil)
K := mat.NewDense(4,2, nil)
modelCovars := make([]mat.Matrix, numDataPoints) //slice of matrices
initalModelCovar := 0.0
modelCovars[0] = mat.NewDense(4,4, getConstList(16, initalModelCovar))
for i := 1; i < numDataPoints; i++ {
//predict step
x_pred := mat.NewDense(4,1, nil)
x_pred.Mul(A, states[i-1])
P_pred := mat.NewDense(4,4, nil)
P_pred.Product(A, modelCovars[i-1], A.T()) //temp
P_pred.Add(P_pred, Q)
//update step
v.Mul(H, x_pred) //temp
v.Sub(noisyData[i], v)
S.Product(H, P_pred, H.T()) //temp
S.Add(S, R)
S_inv.Inverse(S)
K.Product(P_pred, H.T(), S_inv)
x_curr := mat.NewDense(4,1, nil)
x_curr.Mul(K, v) //temp
x_curr.Add(x_pred, x_curr)
states[i] = x_curr
P_curr := mat.NewDense(4,4, nil)
P_curr.Product(K, S, K.T()) //temp
P_curr.Sub(P_pred, P_curr)
modelCovars[i] = P_curr
}
plotKalman(noisyData, data, states)
exportToCSV(noisyData, data, states)
}
//used to get next state from previous state (for data generation) and noisy state from current state (measurement generation)
func getNewState (x, stateChange mat.Matrix, covariances mat.Symmetric) mat.Matrix {
normal, _ := distmv.NewNormal(getConstList(numRows(covariances), 0), covariances, randSource)
normalMat := mat.NewDense(numRows(covariances), numCols(x), normal.Rand(nil))
nextState := mat.NewDense(numRows(x), numCols(x), nil)
nextState.Mul(stateChange, x)
nextState.Add(nextState, normalMat)
return nextState
}
//plots measurements, actual data, and filter predictions
func plotKalman (noisyData, data, states []mat.Matrix) {
p, _ := plot.New()
p.Title.Text = "Kalman Filter Example"
p.X.Label.Text = "X"
p.Y.Label.Text = "Y"
plotutil.AddLinePoints(p,
"Actual", getPoints(data),
"Predictions", getPoints(states),
"Measurements", getPoints(noisyData))
// Save the plot to a PNG file.
p.Save(4*vg.Inch, 4*vg.Inch, "points.png")
}
func exportToCSV (noisyData, data, states []mat.Matrix) {
dataX, dataY := getXYLists(data)
noisyDataX, noisyDataY := getXYLists(noisyData)
statesX, statesY := getXYLists(states)
csvData := make([][]string, len(data)+1)
csvData[0] = []string{"dataX", "dataY", "measurementX", "measurementY", "predictionX", "predictionY"}
for i := range data {
csvData[i+1] = []string{dataX[i], dataY[i], noisyDataX[i], noisyDataY[i], statesX[i], statesY[i]}
}
//csvData := [][]string{dataX, dataY, noisyDataX, noisyDataY, statesX, statesY}
file, _ := os.Create("result.csv")
defer file.Close()
w := csv.NewWriter(file)
w.WriteAll(csvData) // calls Flush internally
if err := w.Error(); err != nil {
log.Fatalln("error writing csv:", err)
}
}
//util for saving data to csv
func getXYLists (matList []mat.Matrix) ([]string, []string) {
xList := make([]string, len(matList))
yList := make([]string, len(matList))
for i := range matList {
xList[i] = strconv.FormatFloat(matList[i].At(0,0), 'f', -1, 64)
yList[i] = strconv.FormatFloat(matList[i].At(numRows(matList[i])/2,0), 'f', -1, 64)
}
return xList, yList
}
//matrix utils
func printMat (m mat.Matrix) {
fmt.Printf("%v\n", mat.Formatted(m, mat.Prefix(""), mat.Excerpt(0)))
}
func numRows (m mat.Matrix) int {
rows, _ := m.Dims()
return rows
}
func numCols (m mat.Matrix) int {
_, cols := m.Dims()
return cols
}
func scaledId (n int, scalar float64) mat.Symmetric {
m := mat.NewSymDense(n, nil)
for i := 0; i < n; i++ {
m.SetSym(i, i, 1)
}
m.ScaleSym(scalar, m)
return m
}
func getConstList (n int, value float64) []float64 {
m := make([]float64, n)
for i := 0; i < n; i++ {
m[i] = value
}
return m
}
func getPoints (list []mat.Matrix) plotter.XYs {
pts := make(plotter.XYs, len(list))
for i := range pts {
pts[i].X = list[i].At(0,0)
pts[i].Y = list[i].At(numRows(list[i])/2,0)
}
return pts
} |
/*
Package rivescript implements the RiveScript chatbot scripting language.
About RiveScript
RiveScript is a scripting language for authoring chatbots. It has a very
simple syntax and is designed to be easy to read and fast to write.
A simple example of what RiveScript looks like:
+ hello bot
- Hello human.
This matches a user's message of "hello bot" and would reply "Hello human."
Or for a slightly more complicated example:
+ my name is *
* <formal> == <bot name> => <set name=<formal>>Wow, we have the same name!
* <get name> != undefined => <set name=<formal>>Did you change your name?
- <set name=<formal>>Nice to meet you, <get name>!
The official website for RiveScript is https://www.rivescript.com/
To test drive RiveScript in your web browser, try the
[RiveScript Playground](https://play.rivescript.com/).
Object Macros
A common feature in many RiveScript implementations is the object macro, which
enables you to write dynamic program code (in your favorite programming
language) to add extra capabilities to your bot. For example, your bot could
answer a question of `what is the weather like in _____` by running some
code to look up their answer via a web API.
The Go version of RiveScript has support for object macros written in Go
(at compile time of your application). It also has optional support for
JavaScript object macros using the Otto library.
UTF-8 Support
UTF-8 support in RiveScript is considered an experimental feature. It is
disabled by default. Enable it by setting `RiveScript.SetUTF8(true)`.
By default (without UTF-8 mode on), triggers may only contain basic ASCII
characters (no foreign characters), and the user's message is stripped of all
characters except letters, numbers and spaces. This means that, for example,
you can't capture a user's e-mail address in a RiveScript reply, because of
the @ and . characters.
When UTF-8 mode is enabled, these restrictions are lifted. Triggers are only
limited to not contain certain metacharacters like the backslash, and the
user's message is only stripped of backslashes and HTML angled brackets
(to protect from obvious XSS if you use RiveScript in a web application).
Additionally, common punctuation characters are stripped out, with the default
set being `/[.,!?;:]/g`. This can be overridden by providing a new regexp
string literal to the `RiveScript.SetUnicodePunctuation` function. Example:
// Make a new bot with UTF-8 mode enabled.
bot := rivescript.New(config.UTF8())
// Override the punctuation characters that get stripped from the
// user's message.
bot.SetUnicodePunctuation(`[.,!?;:]`);
The `<star>` tags in RiveScript will capture the user's "raw" input, so you can
write replies to get the user's e-mail address or store foreign characters in
their name.
See Also
The official homepage of RiveScript, http://www.rivescript.com/
*/
package rivescript
|
package slices
import (
"testing"
)
func TestOccurences(t *testing.T) {
a := []int{0, 1, 2, 3, 4}
b := []int{0, 0, 0, 1}
var actual int
actual = Occurences(a, 0)
if actual != 1 {
t.Errorf("Error: expected [ %d ] got [ %d ]", 1, actual)
}
actual = Occurences(b, 0)
if actual != 3 {
t.Errorf("Error: expected [ %d ] got [ %d ]", 3, actual)
}
actual = Occurences(b, 5)
if actual != 0 {
t.Errorf("Error: expected [ %d ] got [ %d ]", 0, actual)
}
} |
package main
import (
"flag"
"fmt"
"github.com/tengla/fibro/block"
)
var difficulty = flag.Int("difficulty", 2, "The mining difficulty")
func main() {
flag.Parse()
chain := block.CreateChain(*difficulty)
chain.AddBlock(block.NewBlock(map[string]string{
"Name": "I am the first",
}))
chain.AddBlock(block.NewBlock(map[string]string{
"Name": "I am the second",
}))
chain.AddBlock(block.NewBlock(map[string]string{
"Name": "I am the third", "Note": "Funny",
}))
chain.AddBlock(block.NewBlock(map[string]string{
"Name": "I am the fourth",
}))
chain.AddBlock(block.NewBlock(map[string]string{
"Name": "I am the fifth",
}))
chain.AddBlock(block.NewBlock(map[string]string{
"Name": "I am the sixth",
}))
chain.EveryBlock(func(b *block.Block) {
fmt.Printf("%s\n", b.ToJSON())
})
}
|
package _297_Serialize_and_Deserialize_Binary_Tree
import (
"fmt"
"strconv"
"strings"
)
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
type Codec struct {
}
func Constructor() Codec {
return Codec{}
}
// Serializes a tree to a single string.
func (this *Codec) serialize(root *TreeNode) string {
//return preOrderSerialize(root)
//return postOrderSerialize(root)
return BFSSerialize(root)
}
// Deserializes your encoded data to tree.
func (this *Codec) deserialize(data string) *TreeNode {
//startCur := 0
//return preOrderDeserialize(data, &startCur)
//startCur := len(data) - 1
//return postOrderDeserialize(data, &startCur)
return BFSDeserialize(data)
}
// 后续遍历序列化
func postOrderSerialize(root *TreeNode) string {
var str string
if root == nil {
str += ","
return str
}
str += postOrderSerialize(root.Left)
str += postOrderSerialize(root.Right)
str += fmt.Sprintf(",%d", root.Val)
return str
}
// 后序遍历反序列化
func postOrderDeserialize(data string, cur *int) *TreeNode {
if string(data[*cur]) == "," {
*cur -= 1
return nil
}
begin := *cur
for begin >= 0 && string(data[begin]) != "," {
begin--
}
num, _ := strconv.ParseInt(data[begin+1:*cur+1], 10, 64)
node := &TreeNode{Val: int(num)}
*cur = begin - 1
node.Right = postOrderDeserialize(data, cur)
node.Left = postOrderDeserialize(data, cur)
return node
}
// 先序遍历序列化
func preOrderSerialize(root *TreeNode) string {
var str string
if root == nil {
str += ","
return str
}
str += fmt.Sprintf("%d,", root.Val)
str += preOrderSerialize(root.Left)
str += preOrderSerialize(root.Right)
return str
}
// 先序遍历反序列化
func preOrderDeserialize(data string, cur *int) *TreeNode {
if string(data[*cur]) == "," { // 说明是空节点
*cur += 1
return nil
}
tail := *cur
for tail < len(data) && string(data[tail]) != "," {
tail++
}
num, _ := strconv.ParseInt(data[*cur:tail], 10, 64)
node := &TreeNode{Val: int(num)}
*cur = tail + 1
node.Left = preOrderDeserialize(data, cur)
node.Right = preOrderDeserialize(data, cur)
return node
}
// 层序遍历序列化
func BFSSerialize(root *TreeNode) string {
var (
str string
q = []*TreeNode{root}
)
for len(q) != 0 {
node := q[0]
q = q[1:]
newStr := ""
if node != nil {
newStr = fmt.Sprintf("%d,", node.Val)
q = append(q, node.Left, node.Right)
} else {
newStr = fmt.Sprintf(",")
}
str += newStr
}
return str
}
// 层序遍历反序列化
func BFSDeserialize(data string) *TreeNode {
if data == "," {
return nil
}
eles := strings.Split(data, ",")
num, _ := strconv.ParseInt(eles[0], 10, 64)
eles = eles[1:]
root := &TreeNode{Val: int(num)}
q := []*TreeNode{root}
for len(q) > 0 && len(eles) >= 2 {
preNode := q[0]
q = q[1:]
lv, rv := eles[0], eles[1]
eles = eles[2:]
if lv != "" {
num, _ := strconv.ParseInt(lv, 10, 64)
preNode.Left = &TreeNode{Val: int(num)}
q = append(q, preNode.Left)
}
if rv != "" {
num, _ := strconv.ParseInt(rv, 10, 64)
preNode.Right = &TreeNode{Val: int(num)}
q = append(q, preNode.Right)
}
}
return root
}
/**
* Your Codec object will be instantiated and called as such:
* ser := Constructor();
* deser := Constructor();
* data := ser.serialize(root);
* ans := deser.deserialize(data);
*/
|
// address bus implementation with multiple components (RAM,ROM,PIA) attached
package addressbus
type MultiBus struct {
addressMap map[uint16]BusAddressingInternal
blockSize int
components []BusAddressingInternal
}
func (b *MultiBus) InitBus(addressMapBlockSize int) {
b.components = make([]BusAddressingInternal, 0)
b.blockSize = addressMapBlockSize
b.addressMap = make(map[uint16]BusAddressingInternal)
}
func (b *MultiBus) RegisterComponent(addressFrom int, addressTo int, component BusAddressingInternal) {
b.components = append(b.components, component)
blockFrom := uint16(addressFrom / b.blockSize)
blockTo := uint16(addressTo / b.blockSize)
for block := blockFrom; block <= blockTo; block++ {
b.addressMap[block] = component
}
}
func (b *MultiBus) Read(addr uint16) (byte, error) {
block := addr / uint16(b.blockSize)
component, exists := b.addressMap[block]
if exists {
return component.Read(addr), nil
}
return 0, &addressingError{Op: "Read", Address: addr}
}
func (b *MultiBus) Write(addr uint16, data byte) error {
block := addr / uint16(b.blockSize)
component, exists := b.addressMap[block]
if exists {
component.Write(addr, data)
return nil
}
return &addressingError{Op: "Write", Address: addr}
}
|
package transform
import (
"github.com/jwowillo/viztransform/geometry"
)
// Apply the Transformation to the geometry.Point by applying each
// line-reflection making up the Transformation in order.
func Apply(t Transformation, p geometry.Point) geometry.Point {
for _, l := range t {
p = apply(l, p)
}
return p
}
// apply a line-reflection to the geometry.Point as described in
// TypeLineReflection.
func apply(l geometry.Line, p geometry.Point) geometry.Point {
i := geometry.MustPoint(geometry.Intersection(
l,
geometry.PerpendicularThroughPoint(l, p),
))
return geometry.Point{X: p.X + 2*(i.X-p.X), Y: p.Y + 2*(i.Y-p.Y)}
}
// Compose Transformations into a single Transformation which is the
// line-reflections in each Transformation appended together in order.
func Compose(ts ...Transformation) Transformation {
var composed Transformation
for _, t := range ts {
composed = append(composed, t...)
}
return composed
}
// NoTransformation is a Transformation-constructor that creates a
// Transformation with TypeNoTransformation that does nothing to
// geometry.Points.
func NoTransformation() Transformation {
return Transformation{}
}
// LineReflection is a Transformation-constructor that creates a Transformation
// with TypeLineReflection that reflects geometry.Points about geometry.Line l
// as described by TypeLineReflection.
func LineReflection(l geometry.Line) Transformation {
return Transformation{l}
}
// Translation is a Transformation-constructor that creates a Transformation
// with TypeTranslation that translates geometry.Points by the geometry.Vector v
// as described by TypeTranslation.
//
// Returns NoTransformation() if v is length 0.
func Translation(v geometry.Vector) Transformation {
length := geometry.Length(v)
if geometry.IsZero(length) {
return NoTransformation()
}
v = geometry.MustVector(geometry.Scale(v, length/2))
a, b := geometry.Point{X: 0, Y: 0}, geometry.Point{X: v.I, Y: v.J}
l := geometry.MustLine(geometry.NewLineFromPoints(a, b))
return Transformation{
geometry.PerpendicularThroughPoint(l, a),
geometry.PerpendicularThroughPoint(l, b),
}
}
// Rotation is a Transformation-constructor that creates a Transformation with
// TypeRotation that rotates geometry.Points by geometry.Angle rads
// counter-clockwise around geometry.Point p.
//
// Returns NoTransformation() if rads is 0.
func Rotation(p geometry.Point, rads geometry.Angle) Transformation {
if geometry.IsZero(geometry.Number(rads)) {
return NoTransformation()
}
a := geometry.MustLine(geometry.NewLineFromPoints(
p,
geometry.Point{X: p.X + 1, Y: p.Y},
))
b := geometry.Rotate(a, p, rads/2)
return Transformation{a, b}
}
// GlideReflection is a Tranformation-constructor that creates a Transformation
// with TypeGlideReflection that is a Transformation with TypeLineReflection
// with ref used as the geometry.Line used to create it composed with a
// Transformation with TypeTranslation with the projection of geometry.Vector v
// onto the geometry.Line ref used to create it.
//
// Returns LineReflection(ref) if v is length 0.
//
// Has this interface because a Transformation can still be determined if v is
// length 0.
func GlideReflection(ref geometry.Line, v geometry.Vector) Transformation {
a := geometry.PerpendicularThroughPoint(ref, geometry.Point{X: 0, Y: 0})
b := geometry.PerpendicularThroughPoint(
ref,
geometry.Point{X: v.I, Y: v.J},
)
return Compose(
LineReflection(ref),
Translation(geometry.ShortestVector(a, b)),
)
}
|
package cmdutils
import (
"github.com/evleria/quiz-cli/pkg/config"
"github.com/evleria/quiz-cli/pkg/iostreams"
)
type Factory struct {
IOStreams iostreams.IOStreams
ConfigFunc func() config.Config
}
|
package typepublickey
import (
vocab "github.com/go-fed/activity/streams/vocab"
)
// A public key represents a public cryptographical key for a user
type ActivityStreamsPublicKey struct {
ActivityStreamsId vocab.ActivityStreamsIdProperty
ActivityStreamsOwner vocab.ActivityStreamsOwnerProperty
ActivityStreamsPublicKeyPem vocab.ActivityStreamsPublicKeyPemProperty
ActivityStreamsType vocab.ActivityStreamsTypeProperty
alias string
unknown map[string]interface{}
}
// ActivityStreamsPublicKeyExtends returns true if the PublicKey type extends from
// the other type.
func ActivityStreamsPublicKeyExtends(other vocab.Type) bool {
// Shortcut implementation: this does not extend anything.
return false
}
// DeserializePublicKey creates a PublicKey from a map representation that has
// been unmarshalled from a text or binary format.
func DeserializePublicKey(m map[string]interface{}, aliasMap map[string]string) (*ActivityStreamsPublicKey, error) {
alias := ""
if a, ok := aliasMap["https://www.w3.org/ns/activitystreams"]; ok {
alias = a
}
this := &ActivityStreamsPublicKey{
alias: alias,
unknown: make(map[string]interface{}),
}
// HACK: IGNORE TYPE
// TODO: ENCODE THIS HACK IN THE CODE GENERATION
// Begin: Known property deserialization
if p, err := mgr.DeserializeIdPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsId = p
}
if p, err := mgr.DeserializeOwnerPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsOwner = p
}
if p, err := mgr.DeserializePublicKeyPemPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsPublicKeyPem = p
}
if p, err := mgr.DeserializeTypePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsType = p
}
// End: Known property deserialization
// Begin: Unknown deserialization
for k, v := range m {
// Begin: Code that ensures a property name is unknown
if k == "id" {
continue
} else if k == "owner" {
continue
} else if k == "publicKeyPem" {
continue
} else if k == "type" {
continue
} // End: Code that ensures a property name is unknown
this.unknown[k] = v
}
// End: Unknown deserialization
return this, nil
}
// IsOrExtendsPublicKey returns true if the other provided type is the PublicKey
// type or extends from the PublicKey type.
func IsOrExtendsPublicKey(other vocab.Type) bool {
if other.GetTypeName() == "PublicKey" {
return true
}
return PublicKeyIsExtendedBy(other)
}
// NewActivityStreamsPublicKey creates a new PublicKey type
func NewActivityStreamsPublicKey() *ActivityStreamsPublicKey {
typeProp := typePropertyConstructor()
typeProp.AppendXMLSchemaString("PublicKey")
return &ActivityStreamsPublicKey{
ActivityStreamsType: typeProp,
alias: "",
unknown: make(map[string]interface{}, 0),
}
}
// PublicKeyIsDisjointWith returns true if the other provided type is disjoint
// with the PublicKey type.
func PublicKeyIsDisjointWith(other vocab.Type) bool {
// Shortcut implementation: is not disjoint with anything.
return false
}
// PublicKeyIsExtendedBy returns true if the other provided type extends from the
// PublicKey type. Note that it returns false if the types are the same; see
// the "IsOrExtendsPublicKey" variant instead.
func PublicKeyIsExtendedBy(other vocab.Type) bool {
// Shortcut implementation: is not extended by anything.
return false
}
// GetActivityStreamsId returns the "id" property if it exists, and nil otherwise.
func (this ActivityStreamsPublicKey) GetActivityStreamsId() vocab.ActivityStreamsIdProperty {
return this.ActivityStreamsId
}
// GetActivityStreamsOwner returns the "owner" property if it exists, and nil
// otherwise.
func (this ActivityStreamsPublicKey) GetActivityStreamsOwner() vocab.ActivityStreamsOwnerProperty {
return this.ActivityStreamsOwner
}
// GetActivityStreamsPublicKeyPem returns the "publicKeyPem" property if it
// exists, and nil otherwise.
func (this ActivityStreamsPublicKey) GetActivityStreamsPublicKeyPem() vocab.ActivityStreamsPublicKeyPemProperty {
return this.ActivityStreamsPublicKeyPem
}
// GetActivityStreamsType returns the "type" property if it exists, and nil
// otherwise.
func (this ActivityStreamsPublicKey) GetActivityStreamsType() vocab.ActivityStreamsTypeProperty {
return this.ActivityStreamsType
}
// GetTypeName returns the name of this type.
func (this ActivityStreamsPublicKey) GetTypeName() string {
return "PublicKey"
}
// GetUnknownProperties returns the unknown properties for the PublicKey type.
// Note that this should not be used by app developers. It is only used to
// help determine which implementation is LessThan the other. Developers who
// are creating a different implementation of this type's interface can use
// this method in their LessThan implementation, but routine ActivityPub
// applications should not use this to bypass the code generation tool.
func (this ActivityStreamsPublicKey) GetUnknownProperties() map[string]interface{} {
return this.unknown
}
// IsExtending returns true if the PublicKey type extends from the other type.
func (this ActivityStreamsPublicKey) IsExtending(other vocab.Type) bool {
return ActivityStreamsPublicKeyExtends(other)
}
// JSONLDContext returns the JSONLD URIs required in the context string for this
// type and the specific properties that are set. The value in the map is the
// alias used to import the type and its properties.
func (this ActivityStreamsPublicKey) JSONLDContext() map[string]string {
m := map[string]string{"https://www.w3.org/ns/activitystreams": this.alias}
m = this.helperJSONLDContext(this.ActivityStreamsId, m)
m = this.helperJSONLDContext(this.ActivityStreamsOwner, m)
m = this.helperJSONLDContext(this.ActivityStreamsPublicKeyPem, m)
m = this.helperJSONLDContext(this.ActivityStreamsType, m)
return m
}
// LessThan computes if this PublicKey is lesser, with an arbitrary but stable
// determination.
func (this ActivityStreamsPublicKey) LessThan(o vocab.ActivityStreamsPublicKey) bool {
// Begin: Compare known properties
// Compare property "id"
if lhs, rhs := this.ActivityStreamsId, o.GetActivityStreamsId(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "owner"
if lhs, rhs := this.ActivityStreamsOwner, o.GetActivityStreamsOwner(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "publicKeyPem"
if lhs, rhs := this.ActivityStreamsPublicKeyPem, o.GetActivityStreamsPublicKeyPem(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "type"
if lhs, rhs := this.ActivityStreamsType, o.GetActivityStreamsType(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// End: Compare known properties
// Begin: Compare unknown properties (only by number of them)
if len(this.unknown) < len(o.GetUnknownProperties()) {
return true
} else if len(o.GetUnknownProperties()) < len(this.unknown) {
return false
} // End: Compare unknown properties (only by number of them)
// All properties are the same.
return false
}
// Serialize converts this into an interface representation suitable for
// marshalling into a text or binary format.
func (this ActivityStreamsPublicKey) Serialize() (map[string]interface{}, error) {
m := make(map[string]interface{})
typeName := "PublicKey"
if len(this.alias) > 0 {
typeName = this.alias + ":" + "PublicKey"
}
m["type"] = typeName
// Begin: Serialize known properties
// Maybe serialize property "id"
if this.ActivityStreamsId != nil {
if i, err := this.ActivityStreamsId.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsId.Name()] = i
}
}
// Maybe serialize property "owner"
if this.ActivityStreamsOwner != nil {
if i, err := this.ActivityStreamsOwner.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsOwner.Name()] = i
}
}
// Maybe serialize property "publicKeyPem"
if this.ActivityStreamsPublicKeyPem != nil {
if i, err := this.ActivityStreamsPublicKeyPem.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsPublicKeyPem.Name()] = i
}
}
// Maybe serialize property "type"
if this.ActivityStreamsType != nil {
if i, err := this.ActivityStreamsType.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsType.Name()] = i
}
}
// End: Serialize known properties
// Begin: Serialize unknown properties
for k, v := range this.unknown {
// To be safe, ensure we aren't overwriting a known property
if _, has := m[k]; !has {
m[k] = v
}
}
// End: Serialize unknown properties
return m, nil
}
// SetActivityStreamsId sets the "id" property.
func (this *ActivityStreamsPublicKey) SetActivityStreamsId(i vocab.ActivityStreamsIdProperty) {
this.ActivityStreamsId = i
}
// SetActivityStreamsOwner sets the "owner" property.
func (this *ActivityStreamsPublicKey) SetActivityStreamsOwner(i vocab.ActivityStreamsOwnerProperty) {
this.ActivityStreamsOwner = i
}
// SetActivityStreamsPublicKeyPem sets the "publicKeyPem" property.
func (this *ActivityStreamsPublicKey) SetActivityStreamsPublicKeyPem(i vocab.ActivityStreamsPublicKeyPemProperty) {
this.ActivityStreamsPublicKeyPem = i
}
// SetActivityStreamsType sets the "type" property.
func (this *ActivityStreamsPublicKey) SetActivityStreamsType(i vocab.ActivityStreamsTypeProperty) {
this.ActivityStreamsType = i
}
// VocabularyURI returns the vocabulary's URI as a string.
func (this ActivityStreamsPublicKey) VocabularyURI() string {
return "https://www.w3.org/ns/activitystreams"
}
// helperJSONLDContext obtains the context uris and their aliases from a property,
// if it is not nil.
func (this ActivityStreamsPublicKey) helperJSONLDContext(i jsonldContexter, toMerge map[string]string) map[string]string {
if i == nil {
return toMerge
}
for k, v := range i.JSONLDContext() {
/*
Since the literal maps in this function are determined at
code-generation time, this loop should not overwrite an existing key with a
new value.
*/
toMerge[k] = v
}
return toMerge
}
|
package history
import (
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3"
)
const torrents_table = "Torrents"
const torrent_buffer = 15
type History struct {
db *sql.DB
q chan string
ch chan<- string
}
func New(n string, ch chan<- string) (*History, error) {
db, err := sql.Open("sqlite3", n)
if err != nil {
return nil, err
}
if _, err := db.Exec("CREATE TABLE IF NOT EXISTS `" + torrents_table + "` (url string not null primary key)"); err != nil {
return nil, err
}
h := &History{db, make(chan string), ch}
go h.adder()
return h, nil
}
func (h *History) Exists(url string) (bool, error) {
var count int
err := h.db.QueryRow("SELECT COUNT(*) FROM `" + torrents_table + "` WHERE url = \"" + url + "\"").Scan(&count)
return count > 0, err
}
func (h *History) adder() {
for url := range h.q {
_, err := h.db.Exec("INSERT INTO `" + torrents_table + "` VALUES (\"" + url + "\")")
if err != nil {
h.ch <- fmt.Sprint("While inserting", h.q, err.Error())
}
}
}
func (h *History) Add(url string) {
h.q <- url
}
func (h *History) Close() {
close(h.q)
h.db.Close()
}
|
/*
Copyright paskal.maksim@gmail.com
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logrushookopentracing
import (
"github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
)
type Hook struct {
logLevels []log.Level
}
type Options struct {
LogLevels []log.Level
}
const SpanKey = "span"
// create new Hook.
func NewHook(options Options) (*Hook, error) {
hook := Hook{}
hook.logLevels = options.LogLevels
if hook.logLevels == nil {
hook.logLevels = []log.Level{
log.ErrorLevel,
log.FatalLevel,
log.WarnLevel,
log.PanicLevel,
}
}
return &hook, nil
}
// func log.Hook.Levels.
func (hook *Hook) Levels() []log.Level {
return hook.logLevels
}
//nolint:funlen
// func log.Hook.Fire.
func (hook *Hook) Fire(entry *log.Entry) error {
var err error = nil
if dataErr, ok := entry.Data[log.ErrorKey].(error); ok && dataErr != nil {
err = dataErr
}
if span, ok := entry.Data[SpanKey].(opentracing.Span); ok && span != nil && entry.Level >= log.ErrorLevel {
span.SetTag("error", true)
if err != nil {
span.LogKV("error", err)
} else {
span.LogKV("error", entry.Message)
}
}
return nil
}
|
package user
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"time"
"google.golang.org/grpc/codes"
jwt "github.com/dgrijalva/jwt-go"
"github.com/go-chi/chi"
"github.com/go-chi/jwtauth"
"github.com/go-chi/render"
"github.com/ubclaunchpad/pinpoint/gateway/api/ctxutil"
"github.com/ubclaunchpad/pinpoint/gateway/auth"
"github.com/ubclaunchpad/pinpoint/gateway/res"
pinpoint "github.com/ubclaunchpad/pinpoint/protobuf"
"github.com/ubclaunchpad/pinpoint/protobuf/request"
"go.uber.org/zap"
"google.golang.org/grpc/status"
)
var tokenAuth *jwtauth.JWTAuth
// Router routes to all user endpoints
type Router struct {
l *zap.SugaredLogger
c pinpoint.CoreClient
mux *chi.Mux
}
// NewUserRouter instantiates a new router for handling user functionality
func NewUserRouter(l *zap.SugaredLogger, core pinpoint.CoreClient) *Router {
u := &Router{l.Named("users"), core, chi.NewRouter()}
// these should all be public
u.mux.Post("/create", u.createUser)
u.mux.Post("/login", u.login)
// Authenticated endpoints
u.mux.Group(func(r chi.Router) {
// JWT Initialization
key, err := auth.GetAPIPrivateKey()
if err != nil {
log.Fatal(err.Error())
}
// Seek, verify and validate JWT tokens
r.Use(jwtauth.Verifier(jwtauth.New("HS256", key, nil)))
// Handle valid/invalid tokens
r.Use(jwtauth.Authenticator)
r.Post("/verify", u.verify)
})
return u
}
func (u *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
u.mux.ServeHTTP(w, r)
}
func (u *Router) createUser(w http.ResponseWriter, r *http.Request) {
var l = u.l.With("request-id", ctxutil.GetRequestID(r))
// parse request data
decoder := json.NewDecoder(r.Body)
var user request.CreateAccount
if err := decoder.Decode(&user); err != nil {
l.Debugw("error occured reading request", "error", err)
render.Render(w, r, res.ErrBadRequest("invalid request"))
return
}
// create account in core
resp, err := u.c.CreateAccount(r.Context(), &user)
if err != nil {
l.Debugw("error occured creating user account", "error", err)
st, ok := status.FromError(err)
if !ok {
render.Render(w, r, res.ErrInternalServer("failed to create user account", err))
return
}
switch st.Code() {
case codes.InvalidArgument:
render.Render(w, r, res.ErrBadRequest(st.Message()))
default:
render.Render(w, r, res.ErrInternalServer(st.Message(), err))
}
return
}
// success!
render.Render(w, r, res.Msg(resp.GetMessage(), http.StatusCreated,
"email", user.GetEmail()))
}
func (u *Router) login(w http.ResponseWriter, r *http.Request) {
var l = u.l.With("request-id", ctxutil.GetRequestID(r))
if r.Body == nil {
render.Render(w, r, res.ErrBadRequest("missing request body"))
return
}
var decoder = json.NewDecoder(r.Body)
var info struct {
Email string `json:"email"`
Password string `json:"password"`
}
if err := decoder.Decode(&info); err != nil {
render.Render(w, r, res.ErrBadRequest("error occurred parsing user login form entry",
"error", err))
return
}
if info.Email == "" || info.Password == "" {
render.Render(w, r, res.ErrBadRequest("missing fields - both email and password is required"))
return
}
if _, err := u.c.Login(r.Context(), &request.Login{
Email: info.Email, Password: info.Password,
}); err != nil {
render.Render(w, r, res.ErrUnauthorized(err.Error()))
return
}
// No error means authenticated, proceed to generate token
expirationTime := time.Now().Add(30 * time.Minute)
claims := &auth.Claims{
Email: info.Email,
StandardClaims: jwt.StandardClaims{
ExpiresAt: expirationTime.Unix(),
},
}
tokenStr, err := claims.GenerateToken()
if err != nil {
render.Render(w, r, res.ErrInternalServer("failed to generate token", err))
return
}
l.Infow("logged in", "user", info.Email)
render.Render(w, r, res.MsgOK("user logged in",
"token", tokenStr))
}
func (u *Router) verify(w http.ResponseWriter, r *http.Request) {
_, claims, _ := jwtauth.FromContext(r.Context())
email := fmt.Sprintf("%v", claims["email"])
b, err := ioutil.ReadAll(r.Body)
var body = &struct {
Hash string `json:"hash"`
}{}
json.Unmarshal(b, body)
if body.Hash == "" {
render.Render(w, r, res.ErrBadRequest("hash is required"))
return
}
resp, err := u.c.Verify(r.Context(), &request.Verify{Email: email, Hash: body.Hash})
if err != nil {
render.Render(w, r, res.ErrNotFound(err.Error()))
return
}
render.Render(w, r, res.Msg(resp.GetMessage(), http.StatusAccepted))
}
|
package main
import (
"fmt"
"time"
)
func elapsed(what string) func() {
start := time.Now()
return func() {
fmt.Printf("%s took %v\n", what, time.Since(start))
}
}
func main() {
messages := make(chan string)
go func() {
defer elapsed("send")()
fmt.Println("thread: about to ping")
messages <- "thread: ping"
for i := 0; i<5; i++ {
if i == 4 {
close(messages)
}
messages <- fmt.Sprintf("ping %d", i)
}
fmt.Println("thread: ping done, closing channel")
}()
{
defer elapsed("receive")()
time.Sleep(2 * time.Second)
fmt.Println("main: about to receive")
for msg := range messages {
fmt.Println("got: " + msg)
}
fmt.Println("main: receive done")
}
time.Sleep(10 * time.Millisecond)
}
|
//
// Copyright (c) 2016-2022 Snowplow Analytics Ltd. All rights reserved.
//
// This program is licensed to you under the Apache License Version 2.0,
// and you may not use this file except in compliance with the Apache License Version 2.0.
// You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the Apache License Version 2.0 is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
//
package main
import (
"encoding/json"
"errors"
"net/http"
"os"
"strings"
"time"
storagememory "github.com/snowplow/snowplow-golang-tracker/v3/pkg/storage/memory"
gt "github.com/snowplow/snowplow-golang-tracker/v3/tracker"
"github.com/urfave/cli"
)
const (
appVersion = "0.7.0"
appName = "snowplowtrk"
appUsage = "Snowplow Analytics Tracking CLI"
appCopyright = "(c) 2016-2022 Snowplow Analytics, LTD"
)
type selfDescJSON struct {
Schema string `json:"schema"`
Data map[string]interface{} `json:"data"`
}
func main() {
app := cli.NewApp()
app.Name = appName
app.Usage = appUsage
app.Version = appVersion
app.Copyright = appCopyright
app.Compiled = time.Now()
app.Authors = []cli.Author{
{
Name: "Joshua Beemster",
Email: "support@snowplowanalytics.com",
},
{
Name: "Ronny Yabar",
},
}
// Set CLI Flags
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "collector, c",
Usage: "Collector Domain (Required)",
},
cli.StringFlag{
Name: "appid, id",
Usage: "Application Id (Optional)",
Value: appName,
},
cli.StringFlag{
Name: "method, m",
Usage: "Method[POST|GET] (Optional)",
Value: "GET",
},
cli.StringFlag{
Name: "protocol, p",
Usage: "Protocol[http|https] (Optional)",
Value: "https",
},
cli.StringFlag{
Name: "sdjson, sdj",
Usage: "SelfDescribing JSON of the standard form { 'schema': 'iglu:xxx', 'data': { ... } }",
},
cli.StringFlag{
Name: "schema, s",
Usage: "Schema URI, of the form iglu:xxx",
},
cli.StringFlag{
Name: "json, j",
Usage: "Non-SelfDescribing JSON, of the form { ... }",
},
cli.StringFlag{
Name: "ipaddress, ip",
Usage: "Track a custom IP Address (Optional)",
Value: "",
},
cli.StringFlag{
Name: "contexts, ctx",
Usage: "Array of SelfDescribing JSON to add as context to the outbound event",
Value: "[]",
},
}
// Set CLI Action
app.Action = func(c *cli.Context) error {
collector := c.String("collector")
appid := c.String("appid")
method := c.String("method")
protocol := c.String("protocol")
sdjson := c.String("sdjson")
schema := c.String("schema")
jsonData := c.String("json")
ipAddress := c.String("ipaddress")
contexts := c.String("contexts")
// Check that collector domain exists
if collector == "" {
return cli.NewExitError("fatal: --collector needs to be specified", 1)
}
// Fetch the SelfDescribing JSON
sdj, err := getSdJSON(sdjson, schema, jsonData)
if err != nil {
return cli.NewExitError(err.Error(), 1)
}
// Process the contexts array
contextArr, err := getContexts(contexts)
if err != nil {
return cli.NewExitError(err.Error(), 1)
}
// Create channel to block for events
trackerChan := make(chan int, 1)
// Send the event
tracker := initTracker(collector, appid, method, protocol, ipAddress, trackerChan, nil)
statusCode := trackSelfDescribingEvent(tracker, trackerChan, sdj, contextArr)
// Parse return code
returnCode := parseStatusCode(statusCode)
if returnCode != 0 {
return cli.NewExitError("error: event failed to send, check your collector endpoint and try again", returnCode)
}
return nil
}
app.Run(os.Args)
}
// --- CLI
// getSdJSON takes the three applicable arguments
// and attempts to return a SelfDescribingJson.
func getSdJSON(sdjson string, schema string, jsonData string) (*gt.SelfDescribingJson, error) {
if sdjson == "" && schema == "" && jsonData == "" {
return nil, errors.New("fatal: --sdjson or --schema URI plus a --json needs to be specified")
} else if sdjson != "" {
// Process SelfDescribingJson String
res := selfDescJSON{}
d := json.NewDecoder(strings.NewReader(sdjson))
d.UseNumber()
err := d.Decode(&res)
if err != nil {
return nil, err
}
return gt.InitSelfDescribingJson(res.Schema, res.Data), nil
} else if schema != "" && jsonData == "" {
return nil, errors.New("fatal: --json needs to be specified")
} else if schema == "" && jsonData != "" {
return nil, errors.New("fatal: --schema URI needs to be specified")
} else {
// Process Schema and Json Strings
jsonDataMap, err := stringToMap(jsonData)
if err != nil {
return nil, err
}
return gt.InitSelfDescribingJson(schema, jsonDataMap), nil
}
}
// getContexts parses a JSON array string and attempts to convert it into
// an array of SelfDescribingJson objects to track.
func getContexts(contexts string) ([]gt.SelfDescribingJson, error) {
res := []selfDescJSON{}
d := json.NewDecoder(strings.NewReader(contexts))
d.UseNumber()
err := d.Decode(&res)
if err != nil {
return nil, err
}
sdjArr := make([]gt.SelfDescribingJson, len(res))
for i, context := range res {
sdj := gt.InitSelfDescribingJson(
context.Schema,
context.Data,
)
sdjArr[i] = *sdj
}
return sdjArr, nil
}
// --- Tracker
// initTracker creates a new Tracker ready for use
// by the application.
func initTracker(collector string, appid string, method string, protocol string, ipAddress string, trackerChan chan int, httpClient *http.Client) *gt.Tracker {
// Create callback function
callback := func(s []gt.CallbackResult, f []gt.CallbackResult) {
status := 0
if len(s) == 1 {
status = s[0].Status
} else if len(f) == 1 {
status = f[0].Status
}
trackerChan <- status
}
// Create Tracker
emitter := gt.InitEmitter(gt.RequireCollectorUri(collector),
gt.RequireStorage(storagememory.Init()),
gt.OptionCallback(callback),
gt.OptionRequestType(method),
gt.OptionProtocol(protocol),
gt.OptionHttpClient(httpClient),
)
subject := gt.InitSubject()
if ipAddress != "" {
subject.SetIpAddress(ipAddress)
}
tracker := gt.InitTracker(
gt.RequireEmitter(emitter),
gt.OptionSubject(subject),
gt.OptionAppId(appid),
)
return tracker
}
// trackSelfDescribingEvent will pass an event to
// the tracker for sending.
func trackSelfDescribingEvent(tracker *gt.Tracker, trackerChan chan int, sdj *gt.SelfDescribingJson, contexts []gt.SelfDescribingJson) int {
tracker.TrackSelfDescribingEvent(gt.SelfDescribingEvent{
Event: sdj,
Contexts: contexts,
})
returnCode := <-trackerChan
// Ensure that the event is removed
tracker.Emitter.Storage.DeleteAllEventRows()
return returnCode
}
// --- Utilities
// parseStatusCode gets the function return code
// based on the HTTP response of the event.
func parseStatusCode(statusCode int) int {
var returnCode int
result := statusCode / 100
switch result {
case 2, 3:
returnCode = 0
case 4:
returnCode = 4
case 5:
returnCode = 5
default:
returnCode = 1
}
return returnCode
}
// stringToMap attempts to convert a string (assumed JSON)
// to a map.
func stringToMap(str string) (map[string]interface{}, error) {
var jsonDataMap map[string]interface{}
d := json.NewDecoder(strings.NewReader(str))
d.UseNumber()
err := d.Decode(&jsonDataMap)
if err != nil {
return nil, err
}
return jsonDataMap, nil
}
|
package format
import (
"github.com/plandem/xlsx/internal/ml"
"github.com/plandem/xlsx/internal/ml/primitives"
"github.com/stretchr/testify/require"
"testing"
)
func TestConditionalFormat_Set(t *testing.T) {
conditions := NewConditions(
Conditions.Pivot,
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Priority(10),
Condition.Type(ConditionTypeCellIs),
),
Conditions.Rule(
Condition.Priority(90),
Condition.Type(ConditionTypeAboveAverage),
Condition.Style(NewStyles(
Font.Bold,
Font.Color("#112233"),
)),
),
)
require.Equal(t, &ConditionalFormat{
info: &ml.ConditionalFormatting{
Pivot: true,
Bounds: primitives.BoundsListFromRefs("A10:B20"),
},
rules: []*conditionalRule{
{
rule: &ml.ConditionalRule{
Type: ConditionTypeCellIs,
Priority: 10,
},
},
{
rule: &ml.ConditionalRule{
Type: ConditionTypeAboveAverage,
Priority: 90,
},
style: &StyleFormat{
&ml.DiffStyle{
Font: &ml.Font{
Bold: true,
Color: &ml.Color{RGB: "FF112233"},
},
NumberFormat: &ml.NumberFormat{},
Fill: &ml.Fill{
Pattern: &ml.PatternFill{},
Gradient: &ml.GradientFill{},
},
Border: &ml.Border{
Left: &ml.BorderSegment{},
Right: &ml.BorderSegment{},
Top: &ml.BorderSegment{},
Bottom: &ml.BorderSegment{},
Diagonal: &ml.BorderSegment{},
Vertical: &ml.BorderSegment{},
Horizontal: &ml.BorderSegment{},
},
Alignment: &ml.CellAlignment{},
Protection: &ml.CellProtection{},
},
&ml.NamedStyleInfo{},
},
},
},
}, conditions)
}
func TestConditionalFormat_Validate(t *testing.T) {
require.NotNil(t, NewConditions().Validate())
require.NotNil(t, NewConditions(Conditions.Refs("A10:B20")).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Priority(-1),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeCellIs),
),
).Validate())
require.Nil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeCellIs),
Condition.Priority(1),
Condition.Operator(ConditionOperatorLessThanOrEqual),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeTop10),
Condition.Priority(1),
//Condition.Rank(),
),
).Validate())
require.Nil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeTop10),
Condition.Priority(1),
Condition.Rank(1),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeContainsText),
Condition.Priority(1),
),
).Validate())
require.Nil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeContainsText),
Condition.Priority(1),
Condition.Text("abc"),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeTimePeriod),
Condition.Priority(1),
),
).Validate())
require.Nil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeTimePeriod),
Condition.Priority(1),
Condition.TimePeriod(TimePeriodLastMonth),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.ColorScale(),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.ColorScale(
ConditionValue(ConditionValueTypePercent, "10", false),
"#112233",
),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.ColorScale(
ConditionValue(ConditionValueTypePercent, "10", false),
ConditionValue(ConditionValueTypePercent, "50", false),
"#112233",
),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.ColorScale(
ConditionValue(ConditionValueTypePercent, "50", false),
"#112233",
"#334455",
),
),
).Validate())
require.Nil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.ColorScale(
ConditionValue(ConditionValueTypePercent, "10", false),
ConditionValue(ConditionValueTypePercent, "50", false),
"#112233",
"#334455",
),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.IconSet(IconSetType3Arrows, true, true, true),
),
).Validate())
require.NotNil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.IconSet(IconSetType3Arrows, true, true, true,
ConditionValue(ConditionValueTypePercent, "10", false),
),
),
).Validate())
require.Nil(t, NewConditions(
Conditions.Refs("A10:B20"),
Conditions.Rule(
Condition.Type(ConditionTypeAboveAverage),
Condition.Priority(1),
Condition.IconSet(IconSetType3Arrows, true, true, true,
ConditionValue(ConditionValueTypePercent, "10", false),
ConditionValue(ConditionValueTypePercent, "50", false),
),
),
).Validate())
}
|
package module
import (
"buddin.us/eolian/dsp"
)
func init() {
Register("Dynamics", func(Config) (Patcher, error) { return newDynamics() })
}
var (
slopeFactor = 1 / dsp.Float64(dsp.FrameSize)
log1 = dsp.Log(0.1)
)
type dynamics struct {
IO
in, control, threshold, clamp, relax, above, below *In
clampCoef, relaxCoef dsp.Float64
lastClamp, lastRelax dsp.Float64
lastGain, lastMax dsp.Float64
dcBlock *dsp.DCBlock
}
func newDynamics() (*dynamics, error) {
m := &dynamics{
in: NewIn("input", dsp.Float64(0)),
control: NewInBuffer("control", dsp.Float64(0)),
threshold: NewInBuffer("threshold", dsp.Float64(0.5)),
above: NewInBuffer("slopeAbove", dsp.Float64(0.3)),
below: NewInBuffer("slopeBelow", dsp.Float64(1)),
clamp: NewInBuffer("clamp", dsp.Duration(10)),
relax: NewInBuffer("relax", dsp.Duration(10)),
dcBlock: &dsp.DCBlock{},
lastClamp: -1,
lastRelax: -1,
}
err := m.Expose(
"Dynamics",
[]*In{m.in, m.clamp, m.relax, m.control, m.threshold, m.above, m.below},
[]*Out{{Name: "output", Provider: dsp.Provide(m)}},
)
return m, err
}
func (d *dynamics) calcCoefs(clamp, relax dsp.Float64) {
if clamp != d.lastClamp || d.lastClamp == -1 {
if clamp == 0 {
d.clampCoef = 0
} else {
d.clampCoef = dsp.Exp(log1 / clamp)
}
d.lastClamp = clamp
}
if relax != d.lastRelax || d.lastRelax == -1 {
if relax == 0 {
d.relaxCoef = 0
} else {
d.relaxCoef = dsp.Exp(log1 / relax)
}
d.lastRelax = relax
}
}
func (d *dynamics) Process(out dsp.Frame) {
d.in.Process(out)
var (
control = d.control.ProcessFrame()
threshold = d.threshold.ProcessFrame()[0]
above = d.above.ProcessFrame()[0]
below = d.below.ProcessFrame()[0]
clamp = d.clamp.ProcessFrame()[0]
relax = d.relax.ProcessFrame()[0]
)
d.calcCoefs(clamp, relax)
for i := range out {
v := dsp.Abs(control[i])
if v < d.lastMax {
v = v + (d.lastMax-v)*d.relaxCoef
} else {
v = v + (d.lastMax-v)*d.clampCoef
}
d.lastMax = v
}
var nextGain dsp.Float64
if d.lastMax < threshold {
if below == 1 {
nextGain = 1
} else {
nextGain = dsp.Pow(d.lastMax/threshold, below-1)
absGain := dsp.Abs(nextGain)
if absGain < 1.0e-15 {
nextGain = 0
} else if absGain > 1.0e15 {
nextGain = 1
}
}
} else {
if above == 1 {
nextGain = 1
} else {
nextGain = dsp.Pow(d.lastMax/threshold, above-1)
}
}
slope := (nextGain - d.lastGain) * slopeFactor
for i := range out {
out[i] = d.dcBlock.Tick(out[i] * d.lastGain)
d.lastGain += slope
}
}
func (d *dynamics) LuaState() map[string]interface{} {
return map[string]interface{}{
"rms": d.lastMax,
}
}
|
// Copyright 2013 Webconnex, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package auth
import (
"bytes"
"encoding/base64"
"errors"
"strconv"
)
type Auth struct {
Scheme string
RawValue string
}
func Parse(s string) (*Auth, error) {
offset, err := nextToken(s, 0, false)
if err != nil {
return nil, err
}
scheme, err := readToken(s, offset)
if err != nil {
return nil, err
}
offset, err = nextToken(s, offset+len(scheme), false)
if err != nil {
return nil, err
}
return &Auth{string(scheme), string(s[offset:])}, nil
}
func (a *Auth) Values() Values {
v, _ := ParseValues(a.RawValue)
return v
}
func (a *Auth) Basic() (username, password string) {
username, password, _ = ParseBasic(a.RawValue)
return
}
func ParseBasic(s string) (string, string, error) {
b, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return "", "", nil
}
p := bytes.SplitN(b, []byte{':'}, 2)
if len(p) < 2 {
return "", "", errors.New("unknown credential format")
}
return string(p[0]), string(p[1]), nil
}
type Values map[string]string
func ParseValues(s string) (m Values, err error) {
m = make(Values)
err = parseValues(m, s, 0)
return
}
func (v Values) Encode() string {
if v == nil {
return ""
}
buf := make([]byte, 0, 64)
for key, value := range v {
if len(buf) > 0 {
buf = append(buf, ',', ' ')
}
buf = append(buf, key...)
buf = append(buf, '=', '"')
buf = append(buf, value...)
buf = append(buf, '"')
}
return string(buf)
}
func nextToken(s string, offset int, comma bool) (int, error) {
var i = offset
var err error
var commas = 0
Loop:
for length := len(s); i < length; i++ {
switch s[i] {
case ' ', '\t', '\r', '\n':
case ',':
if !comma || commas > 0 {
err = errors.New("unexpected ','" +
" at position " + strconv.Itoa(i))
break Loop
}
commas++
case '(', ')', '<', '>', '@',
';', ':', '\\', '"', '/',
'[', ']', '?', '=', '{', '}':
err = errors.New("unexpected '" + string(s[i]) +
"' at position " + strconv.Itoa(i))
break Loop
default:
if s[i] < ' ' || s[i] >= 127 {
err = errors.New("invalid char at position " + strconv.Itoa(i))
}
break Loop
}
}
return i, err
}
func readToken(s string, offset int) (string, error) {
var i = offset
var err error
Loop:
for length := len(s); i < length; i++ {
c := s[i]
switch c {
case '(', ')', '<', '>', '@',
',', ';', ':', '\\', '"',
'/', '[', ']', '?', '=',
'{', '}', ' ', '\t',
'\r', '\n':
break Loop
default:
if c < ' ' || c >= 127 {
err = errors.New("invalid char at position " + strconv.Itoa(i))
break Loop
}
}
}
if err != nil {
return "", err
}
return s[offset:i], nil
}
func readQuoted(s string, offset int) (string, error) {
var i = offset
var err error
var escape bool
if s[i] != '"' {
return "", errors.New("unexpected '" +
string(s[i]) + "' at position " +
strconv.Itoa(i) + " expecting '\"'")
}
i += 1
for length := len(s); i < length; i++ {
c := s[i]
if escape && c <= 127 {
escape = true
continue
}
if c == 127 || (c < ' ' && c != '\t' && c != '\r' && c != '\n') {
err = errors.New("invalid char at position " + strconv.Itoa(i))
break
} else if c == '"' {
break
} else if c == '\\' {
escape = true
}
}
if s[i] != '"' {
return "", errors.New("expecting '\"' but reached end")
}
if err != nil {
return "", err
}
return s[offset+1 : i], nil
}
func parseValues(m Values, s string, offset int) error {
var i int = offset
var err error
for length := len(s); ; {
// Skip empty space and eat comma
i, err = nextToken(s, i, i != 0)
if err != nil {
break
}
if i == length {
break
}
// Read name token
var name string
name, err = readToken(s, i)
if err != nil {
break
}
i += len(name)
// Eat expected '='
if s[i] != '=' {
err = errors.New("unexpected '" + string(s[i]) +
"' expecting '=' at position " + strconv.Itoa(i))
break
}
i += 1
// Read value token or quoted string
var value string
if s[i] == '"' {
value, err = readQuoted(s, i)
i += 2
} else {
value, err = readToken(s, i)
}
if err != nil {
break
}
m[name] = value
i += len(value)
}
return err
}
|
package jarviscore
import (
"context"
"time"
)
// FuncOnTimer - on timer
// - If it returns false, the timer will end
type FuncOnTimer func(ctx context.Context, timer *Timer) bool
// Timer - timer
type Timer struct {
timer int
ontimer FuncOnTimer
}
// NewTimer - new timer
func NewTimer(timer int, ontimer FuncOnTimer) *Timer {
return &Timer{
timer: timer,
ontimer: ontimer,
}
}
// StartTimer - start timer
func StartTimer(ctx context.Context, timer int, ontimer FuncOnTimer) (*Timer, error) {
if timer <= 0 {
return nil, ErrInvalidTimer
}
if ontimer == nil {
return nil, ErrInvalidTimerFunc
}
t := NewTimer(timer, ontimer)
go t.Start(ctx)
return t, nil
}
// Start - start
func (t *Timer) Start(ctx context.Context) {
ct := time.Second * time.Duration(t.timer)
timer := time.NewTimer(ct)
for {
timer.Reset(ct)
select {
case <-timer.C:
if !t.ontimer(ctx, t) {
break
}
case <-ctx.Done():
break
}
}
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !datarace
package avfs_test
import (
"reflect"
"strconv"
"testing"
"github.com/avfs/avfs"
)
func TestAvfsErrors(t *testing.T) {
groupName := "groupName"
aegErr := avfs.AlreadyExistsGroupError(groupName)
wantErrStr := "group: group " + groupName + " already exists"
if aegErr.Error() != wantErrStr {
t.Errorf("AlreadyExistsGroupError : want error to be %s, got %s", wantErrStr, aegErr.Error())
}
userName := "userName"
wantErrStr = "user: user " + userName + " already exists"
aeuErr := avfs.AlreadyExistsUserError(userName)
if aeuErr.Error() != wantErrStr {
t.Errorf("AlreadyExistsUserError : want error to be %s, got %s", wantErrStr, aeuErr.Error())
}
errStr := "whatever error"
uErr := avfs.UnknownError(errStr)
wantErrStr = "unknown error " + reflect.TypeOf(uErr).String() + " : '" + errStr + "'"
if uErr.Error() != wantErrStr {
t.Errorf("UnknownError : want error to be %s, got %s", wantErrStr, uErr.Error())
}
wantErrStr = "group: unknown group " + groupName
ugErr := avfs.UnknownGroupError(groupName)
if ugErr.Error() != wantErrStr {
t.Errorf("UnknownGroupError : want error to be %s, got %s", wantErrStr, ugErr.Error())
}
gid := -1
wantErrStr = "group: unknown groupid " + strconv.Itoa(gid)
ugiErr := avfs.UnknownGroupIdError(gid)
if ugiErr.Error() != wantErrStr {
t.Errorf("UnknownGroupIdError : want error to be %s, got %s", wantErrStr, ugiErr.Error())
}
wantErrStr = "user: unknown user " + userName
uuErr := avfs.UnknownUserError(userName)
if uuErr.Error() != wantErrStr {
t.Errorf("UnknownUserError : want error to be %s, got %s", wantErrStr, uuErr.Error())
}
uid := -1
wantErrStr = "user: unknown userid " + strconv.Itoa(uid)
uuiErr := avfs.UnknownUserIdError(uid)
if uuiErr.Error() != wantErrStr {
t.Errorf("UnknownUserIdError : want error to be %s, got %s", wantErrStr, uuiErr.Error())
}
}
|
package utils
const (
StatusActive = "active"
StatusSuspended = "suspended"
StatusEmailNotConfirmed = "pending"
)
func IsNotValidStatus(status string) bool {
return status != StatusActive && status != StatusSuspended && status != StatusEmailNotConfirmed
}
|
/**
Exercise 2 ::
1. Use var to DECLARE three variables. The variables should have package level scope.
Do not assign VALUES to the variables. Use the following IDENTIFIERS for the
variables and make sure the variables are of the following TYPE(meaning they can
store VALUES of that TYPE)
a. identifier "x" type int
b. identifier "y" type string
c. identifier "z" type bool
2. in func main
a. print out the values for each variable
b. The compiler assigned values to these variables. What are these values called?
Exercise 3 ::
Using the code from the previous exercise,
1. At the package level scope, assign the following values to the three variables
a. for x assign 42
b. for y assign "James Bond"
c. for z assign true
2. in func main()
a. use fmt.Sprintf to print all of the VALUES to one single string. ASSIGN the
returned value of TYPE string using the short declaration operator to a
VARIABLE with the IDENTIFIER "s"
b. print out the value stored by variable "s"
*/
package main
import "fmt"
//2-1...3-1...
var x int = 42
var y string = "James Bond"
var z bool = true
func main() {
//2-2(a)...
fmt.Println(x)
fmt.Println(y)
fmt.Println(z)
//2-2(b)...ZERO(or default) VALUES
//3-2(a)...
s := fmt.Sprintf("%d\t%s\t%t", x, y, z)
//3-2(b)...
fmt.Println(s)
} |
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apigee
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/apigee/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
)
type Organization struct{}
func OrganizationToUnstructured(r *dclService.Organization) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "apigee",
Version: "beta",
Type: "Organization",
},
Object: make(map[string]interface{}),
}
if r.AddonsConfig != nil && r.AddonsConfig != dclService.EmptyOrganizationAddonsConfig {
rAddonsConfig := make(map[string]interface{})
if r.AddonsConfig.AdvancedApiOpsConfig != nil && r.AddonsConfig.AdvancedApiOpsConfig != dclService.EmptyOrganizationAddonsConfigAdvancedApiOpsConfig {
rAddonsConfigAdvancedApiOpsConfig := make(map[string]interface{})
if r.AddonsConfig.AdvancedApiOpsConfig.Enabled != nil {
rAddonsConfigAdvancedApiOpsConfig["enabled"] = *r.AddonsConfig.AdvancedApiOpsConfig.Enabled
}
rAddonsConfig["advancedApiOpsConfig"] = rAddonsConfigAdvancedApiOpsConfig
}
if r.AddonsConfig.MonetizationConfig != nil && r.AddonsConfig.MonetizationConfig != dclService.EmptyOrganizationAddonsConfigMonetizationConfig {
rAddonsConfigMonetizationConfig := make(map[string]interface{})
if r.AddonsConfig.MonetizationConfig.Enabled != nil {
rAddonsConfigMonetizationConfig["enabled"] = *r.AddonsConfig.MonetizationConfig.Enabled
}
rAddonsConfig["monetizationConfig"] = rAddonsConfigMonetizationConfig
}
u.Object["addonsConfig"] = rAddonsConfig
}
if r.AnalyticsRegion != nil {
u.Object["analyticsRegion"] = *r.AnalyticsRegion
}
if r.AuthorizedNetwork != nil {
u.Object["authorizedNetwork"] = *r.AuthorizedNetwork
}
if r.BillingType != nil {
u.Object["billingType"] = string(*r.BillingType)
}
if r.CaCertificate != nil {
u.Object["caCertificate"] = *r.CaCertificate
}
if r.CreatedAt != nil {
u.Object["createdAt"] = *r.CreatedAt
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.DisplayName != nil {
u.Object["displayName"] = *r.DisplayName
}
var rEnvironments []interface{}
for _, rEnvironmentsVal := range r.Environments {
rEnvironments = append(rEnvironments, rEnvironmentsVal)
}
u.Object["environments"] = rEnvironments
if r.ExpiresAt != nil {
u.Object["expiresAt"] = *r.ExpiresAt
}
if r.LastModifiedAt != nil {
u.Object["lastModifiedAt"] = *r.LastModifiedAt
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Project != nil {
u.Object["project"] = *r.Project
}
if r.ProjectId != nil {
u.Object["projectId"] = *r.ProjectId
}
if r.Properties != nil {
rProperties := make(map[string]interface{})
for k, v := range r.Properties {
rProperties[k] = v
}
u.Object["properties"] = rProperties
}
if r.RuntimeDatabaseEncryptionKeyName != nil {
u.Object["runtimeDatabaseEncryptionKeyName"] = *r.RuntimeDatabaseEncryptionKeyName
}
if r.RuntimeType != nil {
u.Object["runtimeType"] = string(*r.RuntimeType)
}
if r.State != nil {
u.Object["state"] = string(*r.State)
}
if r.SubscriptionType != nil {
u.Object["subscriptionType"] = string(*r.SubscriptionType)
}
return u
}
func UnstructuredToOrganization(u *unstructured.Resource) (*dclService.Organization, error) {
r := &dclService.Organization{}
if _, ok := u.Object["addonsConfig"]; ok {
if rAddonsConfig, ok := u.Object["addonsConfig"].(map[string]interface{}); ok {
r.AddonsConfig = &dclService.OrganizationAddonsConfig{}
if _, ok := rAddonsConfig["advancedApiOpsConfig"]; ok {
if rAddonsConfigAdvancedApiOpsConfig, ok := rAddonsConfig["advancedApiOpsConfig"].(map[string]interface{}); ok {
r.AddonsConfig.AdvancedApiOpsConfig = &dclService.OrganizationAddonsConfigAdvancedApiOpsConfig{}
if _, ok := rAddonsConfigAdvancedApiOpsConfig["enabled"]; ok {
if b, ok := rAddonsConfigAdvancedApiOpsConfig["enabled"].(bool); ok {
r.AddonsConfig.AdvancedApiOpsConfig.Enabled = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.AddonsConfig.AdvancedApiOpsConfig.Enabled: expected bool")
}
}
} else {
return nil, fmt.Errorf("r.AddonsConfig.AdvancedApiOpsConfig: expected map[string]interface{}")
}
}
if _, ok := rAddonsConfig["monetizationConfig"]; ok {
if rAddonsConfigMonetizationConfig, ok := rAddonsConfig["monetizationConfig"].(map[string]interface{}); ok {
r.AddonsConfig.MonetizationConfig = &dclService.OrganizationAddonsConfigMonetizationConfig{}
if _, ok := rAddonsConfigMonetizationConfig["enabled"]; ok {
if b, ok := rAddonsConfigMonetizationConfig["enabled"].(bool); ok {
r.AddonsConfig.MonetizationConfig.Enabled = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.AddonsConfig.MonetizationConfig.Enabled: expected bool")
}
}
} else {
return nil, fmt.Errorf("r.AddonsConfig.MonetizationConfig: expected map[string]interface{}")
}
}
} else {
return nil, fmt.Errorf("r.AddonsConfig: expected map[string]interface{}")
}
}
if _, ok := u.Object["analyticsRegion"]; ok {
if s, ok := u.Object["analyticsRegion"].(string); ok {
r.AnalyticsRegion = dcl.String(s)
} else {
return nil, fmt.Errorf("r.AnalyticsRegion: expected string")
}
}
if _, ok := u.Object["authorizedNetwork"]; ok {
if s, ok := u.Object["authorizedNetwork"].(string); ok {
r.AuthorizedNetwork = dcl.String(s)
} else {
return nil, fmt.Errorf("r.AuthorizedNetwork: expected string")
}
}
if _, ok := u.Object["billingType"]; ok {
if s, ok := u.Object["billingType"].(string); ok {
r.BillingType = dclService.OrganizationBillingTypeEnumRef(s)
} else {
return nil, fmt.Errorf("r.BillingType: expected string")
}
}
if _, ok := u.Object["caCertificate"]; ok {
if s, ok := u.Object["caCertificate"].(string); ok {
r.CaCertificate = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CaCertificate: expected string")
}
}
if _, ok := u.Object["createdAt"]; ok {
if i, ok := u.Object["createdAt"].(int64); ok {
r.CreatedAt = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CreatedAt: expected int64")
}
}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["displayName"]; ok {
if s, ok := u.Object["displayName"].(string); ok {
r.DisplayName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.DisplayName: expected string")
}
}
if _, ok := u.Object["environments"]; ok {
if s, ok := u.Object["environments"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
r.Environments = append(r.Environments, strval)
}
}
} else {
return nil, fmt.Errorf("r.Environments: expected []interface{}")
}
}
if _, ok := u.Object["expiresAt"]; ok {
if i, ok := u.Object["expiresAt"].(int64); ok {
r.ExpiresAt = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.ExpiresAt: expected int64")
}
}
if _, ok := u.Object["lastModifiedAt"]; ok {
if i, ok := u.Object["lastModifiedAt"].(int64); ok {
r.LastModifiedAt = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.LastModifiedAt: expected int64")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["projectId"]; ok {
if s, ok := u.Object["projectId"].(string); ok {
r.ProjectId = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ProjectId: expected string")
}
}
if _, ok := u.Object["properties"]; ok {
if rProperties, ok := u.Object["properties"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rProperties {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.Properties = m
} else {
return nil, fmt.Errorf("r.Properties: expected map[string]interface{}")
}
}
if _, ok := u.Object["runtimeDatabaseEncryptionKeyName"]; ok {
if s, ok := u.Object["runtimeDatabaseEncryptionKeyName"].(string); ok {
r.RuntimeDatabaseEncryptionKeyName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.RuntimeDatabaseEncryptionKeyName: expected string")
}
}
if _, ok := u.Object["runtimeType"]; ok {
if s, ok := u.Object["runtimeType"].(string); ok {
r.RuntimeType = dclService.OrganizationRuntimeTypeEnumRef(s)
} else {
return nil, fmt.Errorf("r.RuntimeType: expected string")
}
}
if _, ok := u.Object["state"]; ok {
if s, ok := u.Object["state"].(string); ok {
r.State = dclService.OrganizationStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.State: expected string")
}
}
if _, ok := u.Object["subscriptionType"]; ok {
if s, ok := u.Object["subscriptionType"].(string); ok {
r.SubscriptionType = dclService.OrganizationSubscriptionTypeEnumRef(s)
} else {
return nil, fmt.Errorf("r.SubscriptionType: expected string")
}
}
return r, nil
}
func GetOrganization(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToOrganization(u)
if err != nil {
return nil, err
}
r, err = c.GetOrganization(ctx, r)
if err != nil {
return nil, err
}
return OrganizationToUnstructured(r), nil
}
func ListOrganization(ctx context.Context, config *dcl.Config) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListOrganization(ctx)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, OrganizationToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyOrganization(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToOrganization(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToOrganization(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyOrganization(ctx, r, opts...)
if err != nil {
return nil, err
}
return OrganizationToUnstructured(r), nil
}
func OrganizationHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToOrganization(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToOrganization(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyOrganization(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteOrganization(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToOrganization(u)
if err != nil {
return err
}
return c.DeleteOrganization(ctx, r)
}
func OrganizationID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToOrganization(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Organization) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"apigee",
"Organization",
"beta",
}
}
func (r *Organization) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Organization) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Organization) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return unstructured.ErrNoSuchMethod
}
func (r *Organization) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Organization) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Organization) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Organization) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetOrganization(ctx, config, resource)
}
func (r *Organization) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyOrganization(ctx, config, resource, opts...)
}
func (r *Organization) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return OrganizationHasDiff(ctx, config, resource, opts...)
}
func (r *Organization) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteOrganization(ctx, config, resource)
}
func (r *Organization) ID(resource *unstructured.Resource) (string, error) {
return OrganizationID(resource)
}
func init() {
unstructured.Register(&Organization{})
}
|
// Copyright 2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !privileged_tests
package bpf
import (
. "gopkg.in/check.v1"
)
func (s *BPFTestSuite) TestExtractCommonName(c *C) {
c.Assert(extractCommonName("cilium_calls_1157"), Equals, "calls")
c.Assert(extractCommonName("cilium_calls_netdev_ns_1"), Equals, "calls")
c.Assert(extractCommonName("cilium_calls_overlay_2"), Equals, "calls")
c.Assert(extractCommonName("cilium_ct4_global"), Equals, "ct4_global")
c.Assert(extractCommonName("cilium_ct_any4_global"), Equals, "ct_any4_global")
c.Assert(extractCommonName("cilium_ep_config_1157"), Equals, "ep_config")
c.Assert(extractCommonName("cilium_events"), Equals, "events")
c.Assert(extractCommonName("cilium_ipcache"), Equals, "ipcache")
c.Assert(extractCommonName("cilium_lb4_reverse_nat"), Equals, "lb4_reverse_nat")
c.Assert(extractCommonName("cilium_lb4_rr_seq"), Equals, "lb4_rr_seq")
c.Assert(extractCommonName("cilium_lb4_services"), Equals, "lb4_services")
c.Assert(extractCommonName("cilium_lxc"), Equals, "lxc")
c.Assert(extractCommonName("cilium_metrics"), Equals, "metrics")
c.Assert(extractCommonName("cilium_policy"), Equals, "policy")
c.Assert(extractCommonName("cilium_policy_1157"), Equals, "policy")
c.Assert(extractCommonName("cilium_policy_reserved_1"), Equals, "policy")
c.Assert(extractCommonName("cilium_proxy4"), Equals, "proxy4")
c.Assert(extractCommonName("cilium_tunnel_map"), Equals, "tunnel_map")
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-03 17:02
* Description:
*****************************************************************/
package netstream
import (
"github.com/go-xe2/x/core/logger"
"sync/atomic"
"time"
)
// 服务端心跳处理
func (p *TStreamServer) processHeartbeat() {
atomic.StoreInt32(&p.heartbeatRun, 1)
defer atomic.StoreInt32(&p.heartbeatRun, 0)
maxLoss := p.options.GetAllowMaxLoss()
speed := p.options.GetHeartbeatSpeed()
for {
select {
case <-p.closed:
return
default:
}
keys := p.clients.Keys()
if len(keys) == 0 {
return
}
for _, key := range keys {
select {
case <-p.closed:
return
default:
}
cli := p.getClient(key)
if cli == nil {
continue
}
if cli.HeartbeatLossCount() > maxLoss {
// 超出允许丢失心跳的的最大值,说明客户端已经断线,断开与该客户端的连接
if e := cli.Close(); e != nil {
p.Log(logger.LEVEL_WARN, "关闭客户端出错:", e)
}
continue
}
t := cli.Heartbeat()
diff := time.Now().Sub(t)
if diff > speed {
cli.UpdateHeartbeat(true)
}
}
time.Sleep(speed)
}
}
func (p *TStreamServer) heartbeatProcessLoop() {
if p.options.GetHeartbeatSpeed() == 0 || p.options.GetAllowMaxLoss() == 0 {
// 不启动心跳
return
}
select {
case <-p.closed:
return
default:
}
if isRun := atomic.LoadInt32(&p.heartbeatRun); isRun != 0 {
return
}
go p.processHeartbeat()
}
|
package platform
import (
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/go-pg/pg"
"io/ioutil"
"time"
_ "github.com/lib/pq"
)
// DBConfig is the required properties to use the database.
type DBConfig struct {
Host string
ServerName string
User string
Password string
DisableTLS bool
ServerCA string
ClientCert string
ClientKeyCert string
}
// OpenDB knows how to open a database connection based on the configuration.
func OpenDB(cfg DBConfig) (*pg.DB, error) {
if cfg.DisableTLS {
return pg.Connect(pgCfg(cfg, nil)), nil
} else {
// TLS enabled, load keys and configure TLS
var serverCA []byte
var err error
if serverCA, err = ioutil.ReadFile(cfg.ServerCA); err != nil {
return nil, err
}
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(serverCA); !ok {
return nil, fmt.Errorf("unable to add cert for serverCA to cert pool. Config: %v", cfg)
}
clientCert := make([]tls.Certificate, 0, 1)
certs, err := tls.LoadX509KeyPair(cfg.ClientCert, cfg.ClientKeyCert)
if err != nil {
return nil, fmt.Errorf("loadX509KeyPair error: %v", err)
}
clientCert = append(clientCert, certs)
opt := pgCfg(cfg, &tls.Config{
ServerName: cfg.ServerName,
Certificates: clientCert,
RootCAs: cp,
InsecureSkipVerify: false,
})
return pg.Connect(opt), nil
}
}
func pgCfg(cfg DBConfig, c *tls.Config) *pg.Options {
opt := &pg.Options{
Addr: fmt.Sprintf("%s:5432", cfg.Host),
Database: "postgres",
User: cfg.User,
Password: cfg.Password,
TLSConfig: c,
MaxRetries: 1,
MinRetryBackoff: -1,
DialTimeout: 30 * time.Second,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
PoolSize: 10,
MaxConnAge: 10 * time.Second,
PoolTimeout: 30 * time.Second,
IdleTimeout: 10 * time.Second,
IdleCheckFrequency: 100 * time.Millisecond,
}
return opt
}
|
package log
import (
"fmt"
)
var _ CloseHandler = (*multiHandler)(nil)
type multiHandler []Handler
// MultiHandler return a multi handler.
func MultiHandler(handlers ...Handler) CloseHandler {
h := multiHandler(handlers)
h.expand()
return &h
}
func (h *multiHandler) expand() {
expanded := multiHandler{}
for _, sub := range *h {
if mh, ok := sub.(*multiHandler); ok {
mh.expand()
expanded = append(expanded, *mh...)
continue
}
expanded = append(expanded, sub)
}
*h = expanded
}
func (h *multiHandler) Handle(entry *Entry) {
l := len(*h)
if l == 0 {
return
} else if l == 1 {
(*h)[0].Handle(entry)
return
}
for i := range *h {
(*h)[i].Handle(entry)
}
}
func (h *multiHandler) Close() error {
l := len(*h)
if l == 0 {
return nil
}
// best effort to close every handler.
var errs []error
for i := range *h {
if closer, ok := (*h)[i].(CloseHandler); ok {
err := closer.Close()
if err != nil {
errs = append(errs, err)
}
}
}
if len(errs) == 0 {
return nil
}
return fmt.Errorf("%v", errs)
}
|
package mocking
import (
"errors"
"testing"
)
func TestThrowError(t *testing.T) {
tests := []struct {
name string
wantErr bool
}{
{"base-case", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := ThrowError(); (err != nil) != tt.wantErr {
t.Errorf("DoSomeStuff() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestDoSomeStuff(t *testing.T) {
tests := []struct {
name string
DoStuff error
ThrowError error
wantErr bool
}{
{"base-case", nil, nil, false},
{"DoStuff error", errors.New("failed"), nil, true},
{"ThrowError error", nil, errors.New("failed"), true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// An example of mocking an interface
// with our mock struct
d := MockDoStuffer{}
d.MockDoStuff = func(string) error { return tt.DoStuff }
// mocking a function that is declared as a variable
// will not work for func A(), must be var A = func()
defer Patch(&ThrowError, func() error { return tt.ThrowError }).Restore()
if err := DoSomeStuff(&d); (err != nil) != tt.wantErr {
t.Errorf("DoSomeStuff() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
|
package auth
type Credentials struct {
User *string `json:"user"`
Password *string `json:"password"`
Token *string `json:"token"`
}
func (c *Credentials) Valid() bool {
return (c.User != nil && c.Password != nil) || c.Token != nil
}
|
package main
import (
"encoding/json"
"github.com/fasthttp/router"
"github.com/valyala/fasthttp"
"net/http"
)
func apiNotFound(ctx *fasthttp.RequestCtx) {
b, err := json.Marshal(map[string]interface{}{
"error": http.StatusText(fasthttp.StatusNotFound),
})
if err != nil {
panic(err)
}
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.SetBodyString(string(b))
}
func apiMethodNotAllowed(ctx *fasthttp.RequestCtx) {
b, err := json.Marshal(map[string]interface{}{
"error": http.StatusText(fasthttp.StatusMethodNotAllowed),
})
if err != nil {
panic(err)
}
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.SetBodyString(string(b))
}
func apiPanicHandler(ctx *fasthttp.RequestCtx, e interface{}) {
b, err := json.Marshal(map[string]interface{}{
"error": http.StatusText(fasthttp.StatusInternalServerError),
"message": e,
})
if err != nil {
panic(err)
}
ctx.SetStatusCode(fasthttp.StatusInternalServerError)
ctx.SetContentType("application/json")
ctx.Response.Header.SetServer("webshot/v1")
ctx.SetBodyString(string(b))
}
func apiRoot(ctx *fasthttp.RequestCtx) {
ctx.Redirect("/v1", fasthttp.StatusMovedPermanently)
}
func apiHelp(ctx *fasthttp.RequestCtx) {
methods := map[string]interface{}{
"/v1": "Help",
"/v1/add": "Add a new URL to the queue to process the screenshot",
"/v1/check": "Check if the URL being processed is complete",
"/v1/info": "Get screenshot URL to view / download image and other useful info",
}
b, err := json.MarshalIndent(methods, "", " ")
if err != nil {
panic(err)
}
ctx.SetStatusCode(200)
ctx.Response.Header.SetContentType("text/plain")
ctx.SetBodyString(string(b))
}
func apiAdd(ctx *fasthttp.RequestCtx) {
ctx.SetStatusCode(fasthttp.StatusNotImplemented)
ctx.SetBodyString(http.StatusText(fasthttp.StatusNotImplemented))
}
func apiCheck(ctx *fasthttp.RequestCtx) {
ctx.SetStatusCode(fasthttp.StatusNotImplemented)
ctx.SetBodyString(http.StatusText(fasthttp.StatusNotImplemented))
}
func apiInfo(ctx *fasthttp.RequestCtx) {
ctx.SetStatusCode(fasthttp.StatusNotImplemented)
ctx.SetBodyString(http.StatusText(fasthttp.StatusNotImplemented))
}
func middleware(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
ctx.SetStatusCode(fasthttp.StatusOK)
ctx.SetContentType("application/json")
ctx.Response.Header.SetServer("webshot/v1")
h(ctx)
}
}
func main() {
app := router.New()
app.NotFound = middleware(apiNotFound)
app.MethodNotAllowed = middleware(apiMethodNotAllowed)
app.PanicHandler = apiPanicHandler
app.Handle("GET", "/", middleware(apiRoot))
app.Handle("GET", "/v1", middleware(apiHelp))
app.Handle("GET", "/v1/add", middleware(apiAdd))
app.Handle("GET", "/v1/check", middleware(apiCheck))
app.Handle("GET", "/v1/info", middleware(apiInfo))
if err := fasthttp.ListenAndServe(":8080", app.Handler); err != nil {
panic(err)
}
}
|
package graphics
// Texture declares all methods required to draw a texture.
type Texture interface {
Close() error
Draw(x, y int32, scaleX, scaleY float32, rotation float64) error
W() int32
H() int32
}
// TextureAtlas declares all methods required to draw a texture atlas.
type TextureAtlas interface {
Close() error
Draw(tile int, x, y int32, scaleX, scaleY float32, rotation float64) error
Len() int
}
|
package adutils
import (
"io/ioutil"
"log"
"os/exec"
)
func SendMail(to, content string) {
// log.Println("mail :", to, "file: ", content)
// args := content + " | sendmail" + to
cmd := exec.Command("./sm.sh", content, to)
_, err := cmd.Output()
if err != nil {
log.Println(err.Error())
}
if err := cmd.Start(); err != nil {
log.Println(err.Error())
}
if err := cmd.Wait(); err != nil {
log.Println(err.Error())
}
}
func SendMail_stub(to, subject, content string) {
cmd := exec.Command("which", "ls")
stdout, err := cmd.StdoutPipe()
if err != nil {
panic(err.Error())
}
stderr, err := cmd.StderrPipe()
if err != nil {
panic(err.Error())
return
}
if err := cmd.Start(); err != nil {
panic(err.Error())
}
bytesErr, err := ioutil.ReadAll(stderr)
if err != nil {
log.Println("ReadAll stderr: ", err.Error())
return
}
if len(bytesErr) != 0 {
log.Printf("stderr is not nil: %s", bytesErr)
return
}
bytes, err := ioutil.ReadAll(stdout)
if err != nil {
log.Println("ReadAll stdout: ", err.Error())
return
}
if err := cmd.Wait(); err != nil {
panic(err.Error())
}
log.Println("stdout:", string(bytes))
}
|
package main
import (
"fmt"
)
const (
colorReset = "\033[0m"
colorRed = "\033[31m"
colorGreen = "\033[32m"
colorYellow = "\033[33m"
colorBlue = "\033[36m"
)
func printMenue() {
fmt.Println(string(colorBlue), `
**************************************************
******* Willkommen bei WER WIRD MILLIONÄR? *******
******* Das Quiz besteht aus 10 Fragen ***********
******* Pro Quiz können 2 Joker gewählt werden ***
**************************************************`)
printHelp()
}
func printHelp() {
fmt.Println(string(colorBlue), `
********** Übersicht aller Commands **************
s : Quiz starten
a,b,c,d : Antwort auswählen
5 : 50/50 Joker auswählen (2 falsche Antworten werden ausgeblendet)
r : Retry Joker auswählen (Auf Nummer sicher gehen, bei falscher Antwort bekommt man noch eine Chance)
q : Quiz beenden
h : Hilfe aufrufen (Übersicht aller Commands)`)
fmt.Println(string(colorReset))
}
func printStartQuiz() {
fmt.Println(string(colorYellow), "Starte Quiz mit 's'")
fmt.Println(string(colorReset))
}
func printQuizStarted() {
fmt.Println(string(colorBlue), "Quiz wurde gestartet, viel Glück!")
fmt.Println(string(colorReset))
}
func printAskQuestionTitle(number int) {
fmt.Println(string(colorBlue), "Frage Nr. ", number)
fmt.Println(string(colorReset))
}
func printAskQuestion(question Question) {
fmt.Println(string(colorYellow), question.Question)
fmt.Println(string(colorBlue))
for i := 0; i < len(question.Answers); i++ {
answer := question.Answers[i]
switch i {
case 0:
fmt.Println("A: ", answer.Text)
case 1:
fmt.Println("B: ", answer.Text)
case 2:
fmt.Println("C: ", answer.Text)
case 3:
fmt.Println("D: ", answer.Text)
}
}
fmt.Println(string(colorReset))
}
func printCorrectAnswer() {
fmt.Println(string(colorGreen), "Die Anwort war richtig!")
if currentLevel < 9 {
fmt.Println(string(colorGreen), "Nächste Frage..")
}
fmt.Println(string(colorReset))
}
func printWrongAnswer() {
fmt.Println(string(colorRed), "Die Anwort war leider falsch.")
fmt.Println(string(colorReset))
}
func printGoodLuckNextTime() {
fmt.Println(string(colorYellow), "Viel Glück beim nächsten Mal!")
fmt.Println(string(colorReset))
}
func printWhichCorrectAnswer(answer Answer) {
fmt.Println(string(colorYellow), "Korrekte Antworte wäre gewesen: ")
fmt.Println(string(colorGreen), answer.Text)
fmt.Println(string(colorReset))
}
func print50ChanceJokerUsed() {
fmt.Println(string(colorGreen), "Der 50 / 50 Joker wurde eingesetzt!")
fmt.Println(string(colorReset))
}
func printRetryJokerUsed() {
fmt.Println(string(colorGreen), "Der Retry Joker wurde eingesetzt!")
fmt.Println(string(colorReset))
}
func printNewChance() {
fmt.Println(string(colorGreen), "Neuer Versuch..")
fmt.Println(string(colorReset))
}
func printSuccess() {
fmt.Println(string(colorGreen), "Gratulation! Alle 10 Fragen wurden richtig beantwortet.")
fmt.Println(" Jetzt steht der Million nichts mehr im Wege!")
}
func printExitQuiz() {
fmt.Println(string(colorYellow), "Quiz wurde beendet!")
fmt.Println(string(colorReset))
}
|
package main
import (
"io/ioutil"
"os"
"testing"
)
func TestSTR(t *testing.T) {
pongoSetup()
tests := []struct {
in interface{}
out string
}{
{"", ""},
{1, ""},
{"Dude", "Dude"},
}
for _, test := range tests {
actual := str(test.in)
if actual != test.out {
t.Errorf("expected %v actual %v", test.in, actual)
}
}
}
func TestCopyFile(t *testing.T) {
inFile, err := ioutil.TempFile(os.TempDir(), "dude")
if err != nil {
panic(err)
}
copyFile(inFile.Name(), "muhaha")
if _, err := os.Stat("muhaha"); os.IsNotExist(err) {
t.Errorf("expected %q actual does not exist with error %v", inFile.Name(), err)
os.Remove(inFile.Name())
return
}
os.Remove(inFile.Name())
os.Remove("muhaha")
}
func TestCopyFileErr(t *testing.T) {
in := ""
_, err := copyFile(in, "")
if err == nil {
t.Errorf("expected copyfile to return nil")
}
inFile, err := ioutil.TempFile(os.TempDir(), "dude")
if err != nil {
panic(err)
}
defer os.Remove(inFile.Name())
_, err = copyFile(inFile.Name(), "")
if err == nil {
t.Errorf("expected copyfile to return nil")
}
}
|
/*
# 按行访问
## 思路
按照与逐行读取 Z 字形图案相同的顺序访问字符串。
## 算法
首先访问 行 0 中的所有字符,接着访问 行 1,然后 行 2,依此类推...
对于所有整数k,
- 行0中的字符位于索引k(k=2⋅numRows−2) 处;
- 行numRows−1中的字符位于索引k(2⋅numRows−2)+numRows−1 处;
- 内部的行 i 中的字符位于索引k(2⋅numRows−2)+i 以及(k+1)(2⋅numRows−2)−i 处;
## 复杂度分析
时间复杂度:O(n)O(n),其中 n == \text{len}(s)n==len(s)。每个索引被访问一次。
空间复杂度:O(n)O(n)。对于 C++ 实现,如果返回字符串不被视为额外空间,则复杂度为 O(1)O(1)。
作者:LeetCode
链接:https://leetcode-cn.com/problems/zigzag-conversion/solution/z-zi-xing-bian-huan-by-leetcode/
*/
func convert(s string, numRows int) string {
if numRows <= 1{
return s
}
l := len(s)
cycleLen := 2 * numRows - 2
var newStr string
for i := 0; i < numRows; i++{ // 行号
for j := 0; j + i < l; j += cycleLen { // 列号
newStr += string(s[j+i])
if(i != 0 && i != (numRows - 1) && (j + cycleLen - i) < l){ // 非第0行和最后一行
newStr += string(s[j + cycleLen - i])
}
}
}
return newStr
} |
package public
import (
"context"
"fmt"
"mime/multipart"
"path"
"time"
"tpay_backend/merchantapi/internal/common"
"tpay_backend/model"
"tpay_backend/pkg/cloudstorage"
"tpay_backend/merchantapi/internal/svc"
"tpay_backend/merchantapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type UploadFileLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewUploadFileLogic(ctx context.Context, svcCtx *svc.ServiceContext) UploadFileLogic {
return UploadFileLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UploadFileLogic) UploadFile(merchantId int64, fileHeader *multipart.FileHeader) (*types.UploadFileReply, error) {
filenameWithSuffix := path.Ext(fileHeader.Filename)
l.Infof("上传的文件名[%v], 后缀名[%v]", fileHeader.Filename, filenameWithSuffix)
baseName := fmt.Sprintf("%d%s", time.Now().Unix(), filenameWithSuffix)
fileName := ""
//转存到该存的地方
switch filenameWithSuffix {
case ".xlsx":
fileName = path.Join(cloudstorage.Xlsx_Dir, baseName)
//fileName = path.Join(cloudstorage.Misc_Dir, fileHeader.Filename)
default:
l.Errorf("文件后缀名[%v]错误", filenameWithSuffix)
return nil, common.NewCodeError(common.NotSupportUploadFile)
}
//
l.Infof("开始上传文件[%v]到云存储", fileName)
if err := l.svcCtx.CloudStorage.UploadByMultipartFileHeader(fileName, fileHeader, true); err != nil {
l.Errorf("上传到云存储失败,err:[%v]", err)
return nil, common.NewCodeError(common.UploadFail)
}
l.Infof("上传文件[%v]到云存储结束", fileName)
//添加上传日志
if err := model.NewUploadFileLogModel(l.svcCtx.DbEngine).Insert(&model.UploadFileLog{
FileName: fileName,
AccountId: merchantId,
AccountType: model.UploadFileLogAccountTypeMerchant,
}); err != nil {
l.Errorf("插入日志失败,err:[%v]", err)
return nil, common.NewCodeError(common.UploadFail)
}
return &types.UploadFileReply{
FileName: fileName,
}, nil
}
|
package mysql
import _ "github.com/go-sql-driver/mysql" // Import the mysql driver.
|
package main
import (
"fmt"
"runtime"
"github.com/Dliv3/Venom/admin/cli"
"github.com/Dliv3/Venom/admin/dispather"
"github.com/Dliv3/Venom/netio"
"github.com/Dliv3/Venom/node"
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
cli.ParseArgs()
fmt.Println("Venom Admin Node Start...")
cli.ShowBanner()
// fmt.Println(node.CurrentNode.HashID)
node.CurrentNode.IsAdmin = 1
node.CurrentNode.InitCommandBuffer()
node.CurrentNode.InitDataBuffer()
dispather.InitAdminHandler()
if cli.Args.Mode == cli.CONNECT_MODE {
netio.InitNode(
"connect",
fmt.Sprintf("%s:%d", cli.Args.RemoteIP, uint16(cli.Args.RemotePort)),
dispather.AdminClient, false, 0)
} else {
netio.InitNode(
"listen",
fmt.Sprintf("0.0.0.0:%d", uint16(cli.Args.LocalPort)),
dispather.AdminServer, false, 0)
}
cli.Interactive()
}
|
package main
import (
"fmt"
"strings"
)
func main() {
var input string
// Scanf scans text read from standard input
// 格納すべきアドレスの場所を伝えるためにアドレス演算子が必要
fmt.Scanf("%s\n", &input)
answer := 0
// Go言語では、文字列は実質的に読み取り専用のバイトのスライス
// b := []byte(input)
// fmt.Println(b)
// nihongo := "日本語"
// for index, runeValue := range nihongo {
// fmt.Printf("%#U starts at byte position %d\n", runeValue, index)
// }
for _, rune := range input {
str := string(rune)
if strings.ToUpper(str) == str {
answer++
}
}
fmt.Println(answer)
}
|
// Demo of sorting on a slice
package main
import (
"fmt"
"sort"
)
// dump slice length, capacity, and contents
func dump(label string, slice []string) {
fmt.Printf("%v: length %v, capacity %v %v\n", label, len(slice),cap(slice), slice)
}
func main() {
// Declare a slice
planets := []string{
"Mercury", "Venus", "Earth", "Mars",
"Jupiter", "Saturn", "Uranus", "Neptune",
}
planets =append(planets,"Krypton")
//Sorts planets alphabetically
sort.StringSlice(planets).Sort()
fmt.Println(planets)
dump("Planets Slice", planets)
}
|
package omokServer
import (
"fmt"
"omokServer/protocol"
"scommon"
)
func (svr *Server) packetProcess(sessionIndex int, packetData []byte) {
packetID := protocol.PeekPacketID(packetData)
_, bodyData := protocol.PeekPacketBody(packetData)
if pfunc := svr.getPacketFunc(packetID); pfunc != nil {
if user, ok := svr._userMgr.GetUser(sessionIndex); ok {
pfunc(user, bodyData)
} else {
scommon.LogError(fmt.Sprintf("[packetProcess] invalid User. _sessionIndex: %d", sessionIndex))
}
} else {
scommon.LogError(fmt.Sprintf("[packetProcess] invalid packetID: %d", packetID))
}
}
func (svr *Server) getPacketFunc(packetID int16) func(*gameUser, []byte) int16 {
for i, id := range svr._funcPackeIdlist {
if id == packetID {
return svr._funclist[i]
}
}
return nil
}
func (svr *Server) settingPacketFunction() {
maxFuncListCount := 16
svr._funclist = make([]func(*gameUser, []byte) int16, 0, maxFuncListCount)
svr._funcPackeIdlist = make([]int16, 0, maxFuncListCount)
svr._addPacketFunction(protocol.PACKET_ID_LOGIN_REQ, svr.packetProcessLogin)
svr._addPacketFunction(protocol.PACKET_ID_HEARTBEAT_RES, svr.packetProcessHeartBeat)
}
func (svr *Server) _addPacketFunction(packetID int16,
packetFunc func(*gameUser, []byte) int16) {
svr._funclist = append(svr._funclist, packetFunc)
svr._funcPackeIdlist = append(svr._funcPackeIdlist, packetID)
}
|
package downloader
import (
"sync"
"github.com/lf-edge/eve/pkg/pillar/pubsub"
"github.com/lf-edge/eve/pkg/pillar/types"
"github.com/lf-edge/eve/pkg/pillar/zedUpload"
log "github.com/sirupsen/logrus"
)
type downloaderContext struct {
dCtx *zedUpload.DronaCtx
subDeviceNetworkStatus pubsub.Subscription
subAppImgConfig pubsub.Subscription
pubAppImgStatus pubsub.Publication
subBaseOsConfig pubsub.Subscription
pubBaseOsStatus pubsub.Publication
subCertObjConfig pubsub.Subscription
pubCertObjStatus pubsub.Publication
subGlobalDownloadConfig pubsub.Subscription
pubGlobalDownloadStatus pubsub.Publication
subDatastoreConfig pubsub.Subscription
deviceNetworkStatus types.DeviceNetworkStatus
globalConfig types.GlobalDownloadConfig
globalStatusLock sync.Mutex
globalStatus types.GlobalDownloadStatus
subGlobalConfig pubsub.Subscription
GCInitialized bool
}
func (ctx *downloaderContext) registerHandlers(ps *pubsub.PubSub) error {
// Look for global config such as log levels
subGlobalConfig, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleGlobalConfigModify,
ModifyHandler: handleGlobalConfigModify,
DeleteHandler: handleGlobalConfigDelete,
WarningTime: warningTime,
ErrorTime: errorTime,
TopicImpl: types.GlobalConfig{},
Ctx: ctx,
})
if err != nil {
return err
}
ctx.subGlobalConfig = subGlobalConfig
subGlobalConfig.Activate()
subDeviceNetworkStatus, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleDNSModify,
ModifyHandler: handleDNSModify,
DeleteHandler: handleDNSDelete,
WarningTime: warningTime,
ErrorTime: errorTime,
TopicImpl: types.DeviceNetworkStatus{},
Ctx: ctx,
AgentName: "nim",
})
if err != nil {
return err
}
ctx.subDeviceNetworkStatus = subDeviceNetworkStatus
subDeviceNetworkStatus.Activate()
subGlobalDownloadConfig, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleGlobalDownloadConfigModify,
ModifyHandler: handleGlobalDownloadConfigModify,
WarningTime: warningTime,
ErrorTime: errorTime,
Ctx: ctx,
TopicImpl: types.GlobalDownloadConfig{},
})
if err != nil {
return err
}
ctx.subGlobalDownloadConfig = subGlobalDownloadConfig
subGlobalDownloadConfig.Activate()
// Look for DatastoreConfig. We should process this
// before any download config ( App/baseos/cert). Without DataStore Config,
// Image Downloads will run into errors.
subDatastoreConfig, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleDatastoreConfigModify,
ModifyHandler: handleDatastoreConfigModify,
DeleteHandler: handleDatastoreConfigDelete,
WarningTime: warningTime,
ErrorTime: errorTime,
AgentName: "zedagent",
TopicImpl: types.DatastoreConfig{},
Ctx: ctx,
})
if err != nil {
return err
}
ctx.subDatastoreConfig = subDatastoreConfig
subDatastoreConfig.Activate()
pubGlobalDownloadStatus, err := ps.NewPublication(pubsub.PublicationOptions{
AgentName: agentName,
TopicType: types.GlobalDownloadStatus{},
})
if err != nil {
return err
}
ctx.pubGlobalDownloadStatus = pubGlobalDownloadStatus
// Set up our publications before the subscriptions so ctx is set
pubAppImgStatus, err := ps.NewPublication(pubsub.PublicationOptions{
AgentName: agentName,
AgentScope: types.AppImgObj,
TopicType: types.DownloaderStatus{},
})
if err != nil {
return err
}
ctx.pubAppImgStatus = pubAppImgStatus
pubAppImgStatus.ClearRestarted()
pubBaseOsStatus, err := ps.NewPublication(pubsub.PublicationOptions{
AgentName: agentName,
AgentScope: types.BaseOsObj,
TopicType: types.DownloaderStatus{},
})
if err != nil {
return err
}
ctx.pubBaseOsStatus = pubBaseOsStatus
pubBaseOsStatus.ClearRestarted()
pubCertObjStatus, err := ps.NewPublication(pubsub.PublicationOptions{
AgentName: agentName,
AgentScope: types.CertObj,
TopicType: types.DownloaderStatus{},
})
if err != nil {
return err
}
ctx.pubCertObjStatus = pubCertObjStatus
pubCertObjStatus.ClearRestarted()
subAppImgConfig, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleAppImgCreate,
ModifyHandler: handleAppImgModify,
DeleteHandler: handleAppImgDelete,
WarningTime: warningTime,
ErrorTime: errorTime,
AgentName: "zedmanager",
AgentScope: types.AppImgObj,
TopicImpl: types.DownloaderConfig{},
Ctx: ctx,
})
if err != nil {
return err
}
ctx.subAppImgConfig = subAppImgConfig
subAppImgConfig.Activate()
subBaseOsConfig, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleBaseOsCreate,
ModifyHandler: handleBaseOsModify,
DeleteHandler: handleBaseOsDelete,
WarningTime: warningTime,
ErrorTime: errorTime,
AgentName: "baseosmgr",
AgentScope: types.BaseOsObj,
TopicImpl: types.DownloaderConfig{},
Ctx: ctx,
})
if err != nil {
return err
}
ctx.subBaseOsConfig = subBaseOsConfig
subBaseOsConfig.Activate()
subCertObjConfig, err := ps.NewSubscription(pubsub.SubscriptionOptions{
CreateHandler: handleCertObjCreate,
ModifyHandler: handleCertObjModify,
DeleteHandler: handleCertObjDelete,
WarningTime: warningTime,
ErrorTime: errorTime,
AgentName: "baseosmgr",
AgentScope: types.CertObj,
TopicImpl: types.DownloaderConfig{},
Ctx: ctx,
})
if err != nil {
return err
}
ctx.subCertObjConfig = subCertObjConfig
subCertObjConfig.Activate()
pubAppImgStatus.SignalRestarted()
pubBaseOsStatus.SignalRestarted()
pubCertObjStatus.SignalRestarted()
return nil
}
func (ctx *downloaderContext) subscription(objType string) pubsub.Subscription {
var sub pubsub.Subscription
switch objType {
case types.AppImgObj:
sub = ctx.subAppImgConfig
case types.BaseOsObj:
sub = ctx.subBaseOsConfig
case types.CertObj:
sub = ctx.subCertObjConfig
default:
log.Fatalf("downloaderSubscription: Unknown ObjType %s\n",
objType)
}
return sub
}
func (ctx *downloaderContext) publication(objType string) pubsub.Publication {
var pub pubsub.Publication
switch objType {
case types.AppImgObj:
pub = ctx.pubAppImgStatus
case types.BaseOsObj:
pub = ctx.pubBaseOsStatus
case types.CertObj:
pub = ctx.pubCertObjStatus
default:
log.Fatalf("downloaderPublication: Unknown ObjType %s\n",
objType)
}
return pub
}
|
package websocket
import (
"github.com/gorilla/websocket"
)
type IWebSocketService interface {
InitConnection(*websocket.Conn, interface {})
Broadcast(*BroadcastInfo)
}
|
// Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package main
import (
"flag"
"log"
"strings"
"github.com/golang/glog"
"github.com/google/mtail/mtail"
"github.com/prometheus/client_golang/prometheus"
"net/http"
_ "net/http/pprof"
)
var (
port = flag.String("port", "3903", "HTTP port to listen on.")
logs = flag.String("logs", "", "List of files to monitor.")
progs = flag.String("progs", "", "Directory containing programs")
// only used by the mtail collector Describe()
collected = prometheus.NewCounter(prometheus.CounterOpts{
Name: "caching_collected_metrics",
Help: "total collected metrics",
})
)
func main() {
flag.Parse()
if *progs == "" {
glog.Exitf("No mtail program directory specified; use -progs")
}
if *logs == "" {
glog.Exitf("No logs specified to tail; use -logs")
}
var logPathnames []string
for _, pathname := range strings.Split(*logs, ",") {
if pathname != "" {
logPathnames = append(logPathnames, pathname)
}
}
if len(logPathnames) == 0 {
glog.Exit("No logs to tail.")
}
o := mtail.Options{
Progs: *progs,
LogPaths: logPathnames,
Port: *port,
}
m, err := mtail.New(o)
if err != nil {
glog.Fatalf("couldn't start: %s", err)
}
c := newMtailCollector(m)
prometheus.MustRegister(c)
go monitor()
http.Handle("/metrics", prometheus.Handler())
log.Fatal(http.ListenAndServe(":"+*port, nil))
}
|
package main
import (
"fmt"
"net/url"
)
func main() {
// url encode
v := url.Values{}
v.Add("msg", "此订单不存在或已经提交")
body := v.Encode()
fmt.Println(v)
fmt.Println(body)
// url decode
m, _ := url.ParseQuery(body)
fmt.Println(m)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.