text stringlengths 11 4.05M |
|---|
package ansi
import (
"fmt"
"strconv"
)
const (
numStaticBytes = 3
numStaticInts = 3
)
// Seq represents an escape sequence, led either by ESC or CSI control
// sequence for writing to some output. May only be constructed by any of the
// Escape.With family of methods.
type Seq struct {
id Escape
numBytes int
numInts int
argBytes [numStaticBytes]byte
argInts [numStaticInts]int
argExtraBytes []byte
argExtraInts []int
}
func (id Escape) seq() Seq {
switch {
case 0x0000 < id && id < 0x001F,
0xEF00 < id && id < 0xEFFF:
return Seq{id: id}
}
panic(fmt.Sprintf("not an Control or Escape rune: %U", id))
}
// With constructs an escape sequence with this identifier and given argument
// byte(s).
// Panics if the escape id is a normal non-Escape rune.
// See Seq.With for details.
func (id Escape) With(arg ...byte) Seq { return id.seq().With(arg...) }
// WithInts constructs an escape sequence with this identifier and the given
// integer argument(s).
// Panics if the escape id is a normal non-Escape rune.
// See Seq.WithInts for details.
func (id Escape) WithInts(args ...int) Seq { return id.seq().WithInts(args...) }
// WithPoint contstructs an escape sequence with an screen point component
// values added as integer arguments in column,row (Y,X) order.
func (id Escape) WithPoint(p Point) Seq { return id.WithInts(p.Y, p.X) }
// ID returns the sequence's Escape identifier.
func (seq Seq) ID() Escape { return seq.id }
// With returns a copy of the sequence with the given argument bytes added.
// Argument bytes will be written immediately after the ESCape identifier
// itself.
func (seq Seq) With(arg ...byte) Seq {
if len(arg) == 0 {
return seq
}
n := seq.numBytes
if extraNeed := n + len(arg) - numStaticBytes; extraNeed > 0 {
argExtraBytes := make([]byte, 0, extraNeed)
if seq.argExtraBytes != nil {
argExtraBytes = append(argExtraBytes, seq.argExtraBytes...)
}
seq.argExtraBytes = argExtraBytes
}
i := 0
for ; i < len(arg) && n < numStaticBytes; i++ {
seq.argBytes[n] = arg[i]
n++
}
for ; i < len(arg); i++ {
seq.argExtraBytes = append(seq.argExtraBytes, arg[i])
n++
}
seq.numBytes = n
return seq
}
// WithInts returns a copy of the sequence with the given integer arguments
// added. These integer arguments will be written after any byte and string
// arguments in base-10 form, separated by a ';' byte.
// Panics if the sequence identifier is not a CSI function.
func (seq Seq) WithInts(args ...int) Seq {
if len(args) == 0 {
return seq
}
if 0xEF80 >= seq.id || seq.id >= 0xEFFF {
panic("may only provide integer arguments to a CSI-sequence")
}
n := seq.numInts
if extraNeed := n + len(args) - numStaticInts; extraNeed > 0 {
argExtraInts := make([]int, 0, extraNeed)
if seq.argExtraInts != nil {
argExtraInts = append(argExtraInts, seq.argExtraInts...)
}
seq.argExtraInts = argExtraInts
}
i := 0
for ; i < len(args) && n < numStaticInts; i++ {
seq.argInts[n] = args[i]
n++
}
for ; i < len(args); i++ {
seq.argExtraInts = append(seq.argExtraInts, args[i])
n++
}
seq.numInts = n
return seq
}
// WithPoint returns a copy of the sequence with the given screen point
// component values added as integer arguments in column,row (Y,X) order.
func (seq Seq) WithPoint(p Point) Seq { return seq.WithInts(p.Y, p.X) }
// AppendTo appends the escape code to the given byte slice.
func (id Escape) AppendTo(p []byte) []byte {
// TODO stricter
switch {
case 0x0000 < id && id < 0x001F: // C0 controls
return append(p, byte(id&0x1F))
case 0x0080 < id && id < 0x009F: // C1 controls
return append(p, '\x1b', byte(0x40|id&0x1F))
case 0xEF20 < id && id < 0xEF7E: // ESC + byte
return append(p, '\x1b', byte(id&0x7F))
case 0xEF80 < id && id < 0xEFFF: // CSI + arg (if any)
return append(p, '\x1b', '[', byte(id&0x7F))
}
return p
}
// AppendWith appends the escape code and any given argument bytes to the given
// byte slice.
func (id Escape) AppendWith(p []byte, arg ...byte) []byte {
// TODO stricter
switch {
case 0x0000 < id && id <= 0x001F: // C0 controls
return append(p, byte(id&0x1F))
case 0x0080 < id && id <= 0x009F: // C1 controls
return append(p, '\x1b', byte(0x40|id&0x1F))
case 0xEF20 < id && id < 0xEF7E: // ESC + byte
return append(append(append(p, '\x1b'), arg...), byte(id&0x7F))
case 0xEF80 < id && id < 0xEFFF: // CSI + arg (if any)
return append(append(append(p, '\x1b', '['), arg...), byte(id&0x7F))
}
return p
}
// AppendTo writes the control sequence into the given byte buffer.
func (seq Seq) AppendTo(p []byte) []byte {
if seq.id == 0 {
return p
}
switch id := seq.id; {
case id == 0:
case 0x0000 < id && id < 0x001F: // C0 controls
p = append(p, byte(id))
p = seq.appendArgBytes(p)
case 0xEF80 < id && id < 0xEFFF: // CSI
p = append(p, "\x1b["...)
p = seq.appendArgBytes(p)
p = seq.appendArgNums(p)
p = append(p, byte(id&0x7F))
case 0xEF00 < id && id < 0xEF7F: // ESC
p = append(p, '\x1b')
p = seq.appendArgBytes(p)
p = append(p, byte(id&0x7F))
case 0xEF20 < id && id < 0xEF2F: // ESC character set control
// NOTE character set selection sequences are special, in that they're
// always a 3 byte sequence, and identified by the first
// (intermediate range) byte after the ESC
p = append(p, '\x1b', byte(id&0x7F), seq.argBytes[0])
default:
panic("inconceivable: should not be able to construct a Seq like that")
}
return p
}
func (seq Seq) appendArgBytes(p []byte) []byte {
switch n := seq.numBytes; n {
case 0:
return p
case 1:
return append(p, seq.argBytes[0])
case 2:
return append(p, seq.argBytes[:2]...)
case 3:
return append(p, seq.argBytes[:3]...)
// NOTE need to add more cases if we increase numStaticBytes
}
p = append(p, seq.argBytes[:3]...)
return append(p, seq.argExtraBytes...)
}
func (seq Seq) appendArgNums(p []byte) []byte {
ni := seq.numInts
if ni == 0 {
return p
}
p = strconv.AppendInt(p, int64(seq.argInts[0]), 10)
i := 1
for ; i < ni && i < numStaticInts; i++ {
p = append(p, ';')
p = strconv.AppendInt(p, int64(seq.argInts[i]), 10)
}
for ; i < ni; i++ {
p = append(p, ';')
p = strconv.AppendInt(p, int64(seq.argExtraInts[i-numStaticInts]), 10)
}
return p
}
// Size returns the number of bytes required to encode the escape.
func (id Escape) Size() int {
switch {
case 0x0000 < id && id <= 0x001F: // C0 controls
return 1
case 0x0080 < id && id <= 0x009F: // C1 controls
return 2
case 0xEF20 < id && id < 0xEF7E: // ESC + byte
return 2
case 0xEF80 < id && id < 0xEFFF: // CSI + arg (if any)
return 3
}
return 0
}
// Size returns the number of bytes required to encode the escape sequence.
func (seq Seq) Size() int {
if seq.id == 0 {
return 0
}
return 4 + seq.numBytes + 10*seq.numInts
}
func (seq Seq) String() string {
if seq.id == 0 && seq.numBytes == 0 && seq.numInts == 0 {
return ""
}
p := make([]byte, 0, seq.numBytes+10*seq.numInts)
p = seq.appendArgBytes(p)
p = seq.appendArgNums(p)
return fmt.Sprintf("%v%q", seq.id, p)
}
|
package db
import "database/sql"
// QueryDb 查询记录
func QueryDb(id int) (interface{}, error) {
return nil, sql.ErrNoRows
}
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.4.1.dev1
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
package swagger
// 200 ok object
type GetFleetsFleetIdOk struct {
// Is free-move enabled
IsFreeMove bool `json:"is_free_move,omitempty"`
// Does the fleet have an active fleet advertisement
IsRegistered bool `json:"is_registered,omitempty"`
// Is EVE Voice enabled
IsVoiceEnabled bool `json:"is_voice_enabled,omitempty"`
// Fleet MOTD in CCP flavoured HTML
Motd string `json:"motd,omitempty"`
}
|
package logger
import (
"nighthawk/rabbitmq"
"time"
"github.com/streadway/amqp"
)
type Logger struct {
Timestamp time.Time `json:"timestamp"`
LogLevel string `json:"log_level"`
Worker string `json:"worker"`
Body interface{} `json:"body"`
}
type LoggerFactory interface {
InitMQLogger()
ConsumeMQLogger(ch *amqp.Channel, rconfig *rabbitmq.RabbitMQConfig)
}
func InitMQLogger() (*amqp.Channel, rabbitmq.RabbitMQConfig) {
rconfig := rabbitmq.LoadRabbitMQConfig(rabbitmq.RABBITMQ_CONFIG_FILE)
conn := rabbitmq.Connect(rconfig.Server)
ch, err := conn.Channel()
if err != nil {
panic(err.Error())
}
return ch, rconfig
}
func ConsumeMQLogger(ch *amqp.Channel, rconfig *rabbitmq.RabbitMQConfig) <-chan amqp.Delivery {
_ = rabbitmq.RabbitQueueDeclare(ch, rconfig.Logger)
messages := rabbitmq.RabbitQueueConsumer(ch, rconfig.Logger)
return messages
}
|
package main
import (
"database/sql"
"encoding/json"
"fmt"
_ "github.com/lib/pq"
"io/ioutil"
"log"
"net/http"
"strconv"
)
type post struct {
UserId int `json:"user_id"`
Id int `json:"id"`
Title string `json:"title"`
Body string `json:"body"`
}
type comment struct {
Name string `json:"name"`
Email string `json:"email"`
Body string `json:"body"`
}
func main() {
host, user, password, dbname, port, sslmode := "localhost", "postgres", "mysql", "test", "5432", "disable"
db, err := sql.Open("postgres", fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%s sslmode=%s", host, user, password, dbname, port, sslmode))
if err != nil {
log.Fatal(err.Error())
}
if err = db.Ping(); err != nil {
log.Fatal(err)
}
request, err := http.Get("https://jsonplaceholder.typicode.com/posts?userId=7")
if err != nil {
log.Fatal(err.Error())
}
res, err := ioutil.ReadAll(request.Body)
if err != nil {
log.Fatal(err.Error())
}
var posts []post
err = json.Unmarshal(res, &posts)
if err != nil {
log.Fatal(err.Error())
}
for _, post := range posts {
tx, err := db.Begin()
if err != nil {
log.Fatal(err.Error())
}
var postId int
row := tx.QueryRow(fmt.Sprintf("INSERT INTO posts(user_id,title,body) VALUES (%d,'%s','%s') RETURNING id", 7, post.Title, post.Body))
if err := row.Scan(&postId); err != nil {
fmt.Println(postId)
tx.Rollback()
log.Fatal(err.Error())
}
commentRequest, err := http.Get("https://jsonplaceholder.typicode.com/comments?postId=" + strconv.Itoa(post.Id))
if err != nil {
log.Fatal(err.Error())
}
res, err := ioutil.ReadAll(commentRequest.Body)
if err != nil {
log.Fatal(err.Error())
}
var comments []comment
err = json.Unmarshal(res, &comments)
if err != nil {
log.Fatal(err.Error())
}
for _, comment := range comments {
func() {
_, err := tx.Exec(fmt.Sprintf("INSERT INTO comments(post_id, name, email, body) VALUES (%d,'%s','%s','%s')", postId, comment.Name, comment.Email, comment.Body))
if err != nil {
tx.Rollback()
log.Fatal(err.Error())
}
}()
}
tx.Commit()
}
var a string
fmt.Scan(a)
}
|
package main
import (
"fmt"
)
func main() {
x := 423
if x == 41 {
fmt.Println("This is 41")
} else if x == 42 {
fmt.Println("This is 42")
} else if x == 43 {
fmt.Println("This is 43")
} else {
fmt.Println("The value was not 40,41,42 or 43")
}
}
|
package gluatemplate
import (
"bytes"
"fmt"
"github.com/yuin/gopher-lua"
"text/template"
)
func Loader(L *lua.LState) int {
tb := L.NewTable()
L.SetFuncs(tb, map[string]lua.LGFunction{
"dostring": doString,
"dofile": doFile,
})
L.Push(tb)
return 1
}
// render
func doString(L *lua.LState) int {
tmplcontent := L.CheckString(1)
var dict interface{}
tmpl, err := template.New("T").Parse(tmplcontent)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
if L.GetTop() >= 2 {
dict = toGoValue(L.CheckTable(2))
}
var b bytes.Buffer
if err := tmpl.Execute(&b, dict); err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
s := b.String()
L.Push(lua.LString(s))
return 1
}
func doFile(L *lua.LState) int {
tmplfile := L.CheckString(1)
var dict interface{}
tmpl, err := template.ParseFiles(tmplfile)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
if L.GetTop() >= 2 {
dict = toGoValue(L.CheckTable(2))
}
var b bytes.Buffer
if err := tmpl.Execute(&b, dict); err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
s := b.String()
L.Push(lua.LString(s))
return 1
}
// This code refers to https://github.com/yuin/gluamapper/blob/master/gluamapper.go
func toGoValue(lv lua.LValue) interface{} {
switch v := lv.(type) {
case *lua.LNilType:
return nil
case lua.LBool:
return bool(v)
case lua.LString:
return string(v)
case lua.LNumber:
return float64(v)
case *lua.LTable:
maxn := v.MaxN()
if maxn == 0 { // table
ret := make(map[interface{}]interface{})
v.ForEach(func(key, value lua.LValue) {
keystr := fmt.Sprint(toGoValue(key))
ret[keystr] = toGoValue(value)
})
return ret
} else { // array
ret := make([]interface{}, 0, maxn)
for i := 1; i <= maxn; i++ {
ret = append(ret, toGoValue(v.RawGetInt(i)))
}
return ret
}
default:
return v
}
}
|
package http
// -> session-token string
// <- recurring-payments []RecurringPayment
import (
"github.com/gin-gonic/gin"
"github.com/hokora/bank/util"
"net/http"
)
type RecurringPayment struct {
ID uint64
Start int64
Stop int64
Next int64
Amt float64
From string
To string
Interval uint16
}
func (s *Server) sessRPGetAll(ctx *gin.Context) {
username := ctx.GetString("username")
pw := util.NewPacketWriterNoLen(len(username) + 1)
pw.AppendString(username)
success, reply, err := s.rpClient.CallReply(PROTO_OUT_RECURRING_PAYMENT_GET_ALL, pw.Pack(), DEFAULT_RESP_TIMEOUT)
if err != nil {
ctx.JSON(http.StatusInternalServerError, Resp{"message": "server error"})
return
}
if success {
pr := util.NewPacketReader(reply)
total := int(pr.ReadUInt16())
rps := make([]RecurringPayment, total)
for i := 0; i < total; i++ {
id := pr.ReadUInt64()
start := pr.ReadInt64()
stop := pr.ReadInt64()
next := pr.ReadInt64()
amt := pr.ReadFloat64()
to := string(pr.ReadBytesWithLenUInt8())
interval := pr.ReadUInt16()
rp := RecurringPayment{id, start, stop, next, amt, username, to, interval}
rps[i] = rp
}
ctx.JSON(http.StatusOK, Resp{"recurring-payments": rps})
} else {
ctx.JSON(http.StatusInternalServerError, Resp{"message": "server error"})
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//775. Global and Local Inversions
//We have some permutation A of [0, 1, ..., N - 1], where N is the length of A.
//The number of (global) inversions is the number of i < j with 0 <= i < j < N and A[i] > A[j].
//The number of local inversions is the number of i with 0 <= i < N and A[i] > A[i+1].
//Return true if and only if the number of global inversions is equal to the number of local inversions.
//Example 1:
//Input: A = [1,0,2]
//Output: true
//Explanation: There is 1 global inversion, and 1 local inversion.
//Example 2:
//Input: A = [1,2,0]
//Output: false
//Explanation: There are 2 global inversions, and 1 local inversion.
//Note:
//A will be a permutation of [0, 1, ..., A.length - 1].
//A will have length in range [1, 5000].
//The time limit for this problem has been reduced.
//func isIdealPermutation(A []int) bool {
//}
// Time Is Money |
package email
func SendVerificationEmail(email, name, url string) error {
// @TODO: Implement Email Service Of Choice
return nil
}
func SendResetPassword(email, name, url string) error {
// @TODO: Implement Email Service Of Choice
return nil
}
func SendWelcomeEmail(email, name string) {
// @TODO: Implement Email Service Of Choice
}
func SendNotificationEmail(email, name, subject, content string) {
// @TODO: Implement Email Service Of Choice
}
|
package apperror
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"path"
"runtime"
"strconv"
)
const (
ErrFailedToDecodeConfigurationFile = "Failed to decode configuration file: %v\n"
)
type V1Error struct {
ErrorCode string `json:"error_code"`
PublicMessage string `json:"public_message"`
DebugMessage string `json:"debug_message"`
HttpCode int `json:"http_code"`
}
var errorData = make(map[string]*V1Error)
func (e *V1Error) Error() string {
errorStr := "public_message:" + e.PublicMessage + ";" +
"debug_message:" + e.DebugMessage + ";" +
"http_code:" + strconv.Itoa(e.HttpCode)
return errorStr
}
func init() {
_, filename, _, _ := runtime.Caller(0)
errorConfigFilePath := path.Join(path.Dir(filename), "./json/errorV1.json")
errorConfigFile, _ := ioutil.ReadFile(errorConfigFilePath)
err := json.Unmarshal(errorConfigFile, &errorData)
if err != nil {
log.Fatalf(ErrFailedToDecodeConfigurationFile, err)
}
}
func NewV1Error(code string, args ...interface{}) *V1Error {
err := errorData[code]
err.DebugMessage = fmt.Sprintf(err.DebugMessage, args...)
return err
} |
package intercom
import (
"encoding/json"
"fmt"
)
// Admin represents an Admin in Intercom.
type Admin struct {
ID json.Number `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Email string `json:"email"`
}
// AdminList represents an object holding list of Admins
type AdminList struct {
Admins []Admin
}
// AdminService handles interactions with the API through an AdminRepository.
type AdminService struct {
Repository AdminRepository
}
// List lists the Admins associated with your App.
func (c *AdminService) List() (AdminList, error) {
return c.Repository.list()
}
// IsNobodyAdmin is a helper function to determine if the Admin is 'Nobody'.
func (a Admin) IsNobodyAdmin() bool {
return a.Type == "nobody_admin"
}
// MessageAddress gets the address for a Contact in order to message them
func (a Admin) MessageAddress() MessageAddress {
return MessageAddress{
Type: "admin",
ID: a.ID.String(),
}
}
func (a Admin) String() string {
return fmt.Sprintf("[intercom] %s { id: %s name: %s, email: %s }", a.Type, a.ID, a.Name, a.Email)
}
|
package main
import "fmt"
func soma(a int, b int) int {
return a + b
}
func main() {
res := soma(5, 5)
fmt.Println("5+5 =", res)
}
|
package parcels
/*
import (
"crypto/md5"
"encoding/binary"
"encoding/hex"
"encoding/json"
"hash"
"math"
"sort"
"spWebFront/FrontKeeper/infrastructure/core"
"spWebFront/FrontKeeper/infrastructure/log"
"spWebFront/FrontKeeper/server/app/domain/model"
)
type Parcel = model.Parcel
/*struct {
model.Parcel
Relevance float64 `json:"-"` // Релевантность
Group string `json:"group"` // Группа
NameSimple string `json:"name_simple"` // Простое название
NameSearchIndex string `json:"search_index"` // Строка для поиска товара (язык 1)
NameSearchIndex2 string `json:"search_index_lang2"` // Строка для поиска товара (язык 2)
NameSearchIndex3 string `json:"search_index_lang3"` // Строка для поиска товара (язык 3)
NameGroupIndex string `json:"group_index"` // Строка для группировки товара (название без производителя)
InnGroupIndex string `json:"inn_index"` // Строка для группировки товара по inn (название без производителя)
InnSearchIndex string `json:"name_inn"` // Полное название действующего вещества (язык 1)
InnSearchIndex2 string `json:"name_inn_lang2"` // Полное название действующего вещества (язык 2)
InnSearchIndex3 string `json:"name_inn_lang3"` // Полное название действующего вещества (язык 3)
InnSortIndex string `json:"inn_sort_index"` // Строка для сортировки по ИНН
ProvisorBonus float64 `json:"provisor_bonus"` // Бонус провизора
DateExpire int64 `json:"date_expire"` // Срок годности медикамента
BrandFormName string `json:"brand_form_name"` // Название бренда
BrandCompAmountSum float64 `json:"brand_comp_amount_sum"` //
BrandPackVolume float64 `json:"brand_pack_volume"` // Объем фасовки
UnitPackName string `json:"unit_pack_name` // Единицы измерения фасовки
Number int64 `json:"number"` //
// NameSortIndex string `json:"sort_index"` // Строка для сортировки по имени
}* /
type Parcels []*Parcel
func (ps Parcels) Relevance() (relevance float64) {
for i, p := range ps {
if i == 0 || p.Relevance > relevance {
relevance = p.Relevance
}
}
return
}
func (ps Parcels) ToDocuments() []json.RawMessage {
res := make([]json.RawMessage, 0, len(ps))
for _, p := range ps {
if p == nil {
continue
}
res = append(res, core.CloneBytes([]byte(p.Document)))
}
return res
}
type TunerOptions struct {
Sort []*model.Sort `json:"sort"`
Group []string `json:"group"`
Distinct []string `json:"distinct"`
Lang int `json:"lang"`
}
type Tuner interface {
Execute(parcels Parcels, options TunerOptions) Parcels
}
type ParcelSorter interface {
Compile(fields []*model.Sort, lang int) ParcelHandler
Execute(parcels Parcels, comparer ParcelComparer) Parcels
}
type ParcelDistincter interface {
Compile(fields []string, lang int) ParcelHandler
Execute(parcels Parcels, hasher ParcelHasher) Parcels
}
type ParcelGrouper interface {
Compile(fields []string, lang int) ParcelHandler
Execute(parcels Parcels, hasher ParcelHasher) map[string]Parcels
}
type mainTuner struct {
grouper ParcelGrouper
sorter ParcelSorter
distincter ParcelDistincter
}
func (tuner *mainTuner) Execute(
parcels Parcels,
options TunerOptions,
) Parcels {
if len(parcels) <= 1 {
return parcels
}
// if len(sort) == 0 {
// sort = []*model.Sort{
// {
// Field: "margin",
// Desc: true,
// },
// }
// }
// if len(distincts) == 0 {
// distincts = []string{"id_drug"}
// }
groups := tuner.grouper.Compile(options.Group, options.Lang)
distincts := tuner.distincter.Compile(options.Distinct, options.Lang)
sorts := tuner.sorter.Compile(options.Sort, options.Lang)
gs := tuner.grouper.Execute(parcels, groups)
for i, parcels := range gs {
parcels = tuner.sorter.Execute(parcels, sorts)
parcels = tuner.distincter.Execute(parcels, distincts)
gs[i] = parcels
}
gs2 := sortGroups(gs, compileNameReader(options.Lang))
result := make(Parcels, 0, len(parcels))
for _, g := range gs2 {
for _, p := range g.parcels {
result = append(result, p)
}
}
return result
}
func NewTuner(
grouper ParcelGrouper,
sorter ParcelSorter,
distincter ParcelDistincter,
) Tuner {
return &mainTuner{
grouper: grouper,
sorter: sorter,
distincter: distincter,
}
}
func sortGroups(
groups map[string]Parcels,
reader ParcelStringReader,
) parcelGroups {
gs := make(parcelGroups, 0, len(groups))
for _, g := range groups {
if len(g) == 0 {
continue
}
gs = append(
gs,
&parcelGroup{
relevance: g.Relevance(),
parcels: g,
name: reader(g[0]),
},
)
}
sort.Sort(gs)
// for _, g := range gs {
// log.Println("GROUP", g.name, g.relevance)
// for _, p := range g.parcels {
// log.Println("PARCEL", p.Name)
// }
// }
return gs
}
func compileNameReader(
lang int,
) ParcelStringReader {
switch lang {
case 2:
return parcelName2Reader
case 3:
return parcelName3Reader
default:
return parcelNameReader
}
}
func compileRowHandler(
field string,
handler ParcelHandler,
lang int,
) ParcelHandler {
switch field {
case "relevance":
return &floatHandler{
reader: parcelRelevanceReader,
ParcelHandler: handler,
}
case "margin":
return &floatHandler{
reader: parcelMarginReader,
ParcelHandler: handler,
}
case "price", "price_sell_sum":
return &floatHandler{
reader: parcelPriceReader,
ParcelHandler: handler,
}
case "quantity":
return &floatHandler{
reader: parcelQuantityReader,
ParcelHandler: handler,
}
case "date_start", "date":
return &intHandler{
reader: parcelDateStartReader,
ParcelHandler: handler,
}
case "id_drug":
return &stringHandler{
reader: parcelDrugReader,
ParcelHandler: handler,
}
case "group":
return &stringHandler{
reader: parcelGroupReader,
ParcelHandler: handler,
}
case "name_simple":
return &stringHandler{
reader: parcelNameSimpleReader,
ParcelHandler: handler,
}
case "name_group_index":
return &stringHandler{
reader: parcelNameGroupIndexReader,
ParcelHandler: handler,
}
case "inn_group_index":
return &stringHandler{
reader: parcelInnGroupIndexReader,
ParcelHandler: handler,
}
case "inn_sort_index":
return &stringHandler{
reader: parcelInnSortIndexReader,
ParcelHandler: handler,
}
case "provisor_bonus":
return &floatHandler{
reader: parcelProvisorBonusReader,
ParcelHandler: handler,
}
case "date_expire":
return &intHandler{
reader: parcelDateExpireReader,
ParcelHandler: handler,
}
case "brand_forrm_name":
return &stringHandler{
reader: parcelBrandFormNameReader,
ParcelHandler: handler,
}
case "brand_comp_amount_sum":
return &floatHandler{
reader: parcelBrandCompAmountSumReader,
ParcelHandler: handler,
}
case "brand_pack_volume":
return &floatHandler{
reader: parcelBrandPackVolumeReader,
ParcelHandler: handler,
}
case "unit_pack_name":
return &stringHandler{
reader: parcelUnitPackNameReader,
ParcelHandler: handler,
}
case "number":
return &intHandler{
reader: parcelNumberReader,
ParcelHandler: handler,
}
case "search_index_lang1":
return &stringHandler{
reader: parcelNameSearchIndexReader,
ParcelHandler: handler,
}
case "search_index_lang2":
return &stringHandler{
reader: parcelNameSearchIndex2Reader,
ParcelHandler: handler,
}
case "search_index_lang3":
return &stringHandler{
reader: parcelNameSearchIndex2Reader,
ParcelHandler: handler,
}
case "name", "name_long_lang1":
return &stringHandler{
reader: parcelNameReader,
ParcelHandler: handler,
}
case "name2", "name_long_lang2":
return &stringHandler{
reader: parcelName2Reader,
ParcelHandler: handler,
}
case "name3", "name_long_lang3":
return &stringHandler{
reader: parcelName3Reader,
ParcelHandler: handler,
}
case "inn_search_index", "name_inn_lang1":
return &stringHandler{
reader: parcelInnSearchIndexReader,
ParcelHandler: handler,
}
case "name_inn_lang2":
return &stringHandler{
reader: parcelInnSearchIndex2Reader,
ParcelHandler: handler,
}
case "name_inn_lang3":
return &stringHandler{
reader: parcelInnSearchIndex3Reader,
ParcelHandler: handler,
}
case "search_index":
switch lang {
case 2:
return compileRowHandler("search_index_lang2", handler, lang)
case 3:
return compileRowHandler("search_index_lang3", handler, lang)
default:
return compileRowHandler("search_index_lang1", handler, lang)
}
case "name_long":
switch lang {
case 2:
return compileRowHandler("name_long_lang2", handler, lang)
case 3:
return compileRowHandler("name_long_lang3", handler, lang)
default:
return compileRowHandler("name_long_lang1", handler, lang)
}
case "name_inn_lang":
switch lang {
case 2:
return compileRowHandler("name_inn_lang2", handler, lang)
case 3:
return compileRowHandler("name_inn_lang3", handler, lang)
default:
return compileRowHandler("name_inn_lang1", handler, lang)
}
default:
log.Println("Unknown parcel's field", field)
return handler
}
}
type ParcelIntReader func(parcel *Parcel) int64
type ParcelFloatReader func(parcel *Parcel) float64
type ParcelStringReader func(parcel *Parcel) string
var (
parcelEmptyReader = func(parcel *Parcel) string {
return ""
}
parcelDrugReader = func(parcel *Parcel) string {
return parcel.Drug
}
parcelRelevanceReader = func(parcel *Parcel) float64 {
return parcel.Relevance
}
parcelMarginReader = func(parcel *Parcel) float64 {
return parcel.Margin
}
parcelPriceReader = func(parcel *Parcel) float64 {
return parcel.Price
}
parcelQuantityReader = func(parcel *Parcel) float64 {
if parcel.QuantDiv == 0 {
return float64(parcel.QuantNum)
}
return float64(parcel.QuantNum) / float64(parcel.QuantDiv)
}
parcelDateStartReader = func(parcel *Parcel) int64 {
return parcel.DateStart
}
parcelNameReader = func(parcel *Parcel) string {
return parcel.Name
}
parcelName2Reader = func(parcel *Parcel) string {
return parcel.Name2
}
parcelName3Reader = func(parcel *Parcel) string {
return parcel.Name3
}
parcelGroupReader = func(parcel *Parcel) string {
return parcel.Group
}
parcelNameSimpleReader = func(parcel *Parcel) string {
return parcel.NameSimple
}
parcelNameGroupIndexReader = func(parcel *Parcel) string {
return parcel.NameGroupIndex
}
parcelInnGroupIndexReader = func(parcel *Parcel) string {
return parcel.InnGroupIndex
}
parcelInnSearchIndexReader = func(parcel *Parcel) string {
return parcel.InnSearchIndex
}
parcelInnSearchIndex2Reader = func(parcel *Parcel) string {
return parcel.InnSearchIndex2
}
parcelInnSearchIndex3Reader = func(parcel *Parcel) string {
return parcel.InnSearchIndex3
}
parcelInnSortIndexReader = func(parcel *Parcel) string {
return parcel.InnSortIndex
}
parcelProvisorBonusReader = func(parcel *Parcel) float64 {
return parcel.ProvisorBonus
}
parcelDateExpireReader = func(parcel *Parcel) int64 {
return parcel.DateExpire
}
parcelBrandFormNameReader = func(parcel *Parcel) string {
return parcel.BrandFormName
}
parcelBrandCompAmountSumReader = func(parcel *Parcel) float64 {
return parcel.BrandCompAmountSum
}
parcelBrandPackVolumeReader = func(parcel *Parcel) float64 {
return parcel.BrandPackVolume
}
parcelUnitPackNameReader = func(parcel *Parcel) string {
return parcel.UnitPackName
}
parcelNumberReader = func(parcel *Parcel) int64 {
return parcel.Number
}
parcelNameSearchIndexReader = func(parcel *Parcel) string {
return parcel.NameSearchIndex
}
parcelNameSearchIndex2Reader = func(parcel *Parcel) string {
return parcel.NameSearchIndex2
}
parcelNameSearchIndex3Reader = func(parcel *Parcel) string {
return parcel.NameSearchIndex3
}
)
type ParcelHasher interface {
Hash(parcel *Parcel, hash hash.Hash)
}
type ParcelComparer interface {
Compare(a, b *Parcel) int
}
type ParcelHandler interface {
ParcelComparer
ParcelHasher
}
type descHandler struct {
ParcelHandler
}
func (h *descHandler) Compare(a, b *Parcel) int {
val := h.ParcelHandler.Compare(a, b)
return -val
}
type intHandler struct {
ParcelHandler
reader ParcelIntReader
}
func (h *intHandler) Compare(a, b *Parcel) int {
va := h.reader(a)
vb := h.reader(b)
if va < vb {
return -1
}
if va > vb {
return 1
}
return h.ParcelHandler.Compare(a, b)
}
func (h *intHandler) Hash(parcel *Parcel, hash hash.Hash) {
value := h.reader(parcel)
bytes := make([]byte, 8)
binary.LittleEndian.PutUint64(bytes, uint64(value))
hash.Write(bytes)
h.ParcelHandler.Hash(parcel, hash)
}
type floatHandler struct {
ParcelHandler
reader ParcelFloatReader
}
func (h *floatHandler) Compare(a, b *Parcel) int {
va := h.reader(a)
vb := h.reader(b)
if va < vb {
return -1
}
if va > vb {
return 1
}
return h.ParcelHandler.Compare(a, b)
}
func (h *floatHandler) Hash(parcel *Parcel, hash hash.Hash) {
value := h.reader(parcel)
bits := math.Float64bits(value)
bytes := make([]byte, 8)
binary.LittleEndian.PutUint64(bytes, bits)
hash.Write(bytes)
h.ParcelHandler.Hash(parcel, hash)
}
type stringHandler struct {
ParcelHandler
reader ParcelStringReader
}
func (h *stringHandler) Compare(a, b *Parcel) int {
va := h.reader(a)
vb := h.reader(b)
cmp := core.Compare(vb, va)
if cmp != 0 {
return cmp
}
return h.ParcelHandler.Compare(a, b)
}
func (h *stringHandler) Hash(parcel *Parcel, hash hash.Hash) {
value := h.reader(parcel)
bytes := []byte(value)
hash.Write(bytes)
h.ParcelHandler.Hash(parcel, hash)
}
type terminalHandler struct {
reader ParcelStringReader
}
func (h *terminalHandler) Compare(a, b *Parcel) int {
va := h.reader(a)
vb := h.reader(b)
return core.Compare(vb, va)
}
func (h *terminalHandler) Hash(parcel *Parcel, hash hash.Hash) {
}
func NewTerminal() ParcelHandler {
return &terminalHandler{
reader: parcelEmptyReader,
}
}
type parcelGroup struct {
relevance float64
parcels Parcels
name string
}
type parcelGroups []*parcelGroup
func (groups parcelGroups) Len() int {
return len(groups)
}
func (groups parcelGroups) Swap(i, j int) {
groups[i], groups[j] = groups[j], groups[i]
}
func (groups parcelGroups) Less(i, j int) bool {
a := groups[i]
b := groups[j]
if a.relevance < b.relevance {
return false
}
if a.relevance > b.relevance {
return true
}
return core.Compare(a.name, b.name) < 0
}
type parcelSorterHelper struct {
comparer ParcelComparer
parcels Parcels
}
func (helper *parcelSorterHelper) Len() int {
return len(helper.parcels)
}
func (helper *parcelSorterHelper) Swap(i, j int) {
helper.parcels[i], helper.parcels[j] = helper.parcels[j], helper.parcels[i]
}
func (helper *parcelSorterHelper) Less(i, j int) bool {
return helper.comparer.Compare(helper.parcels[i], helper.parcels[j]) < 0
}
type parcelSorter struct {
terminal ParcelHandler
disabled bool
}
func (sorter *parcelSorter) Compile(
fields []*model.Sort,
lang int,
) ParcelHandler {
if len(fields) == 0 {
return sorter.terminal
}
handler := sorter.Compile(fields[1:], lang)
s := fields[0]
h := compileRowHandler(s.Field, handler, lang)
if s.Desc {
return &descHandler{ParcelHandler: h}
}
return h
}
func (sorter *parcelSorter) Execute(
parcels Parcels,
comparer ParcelComparer,
) Parcels {
if sorter.disabled || len(parcels) <= 1 {
return parcels
}
helper := &parcelSorterHelper{
parcels: parcels,
comparer: comparer,
}
sort.Sort(helper)
return helper.parcels
}
func NewParcelSorter(
terminal ParcelHandler,
enabled bool,
) ParcelSorter {
return &parcelSorter{
terminal: terminal,
disabled: !enabled,
}
}
type parcelDistincter struct {
terminal ParcelHandler
disabled bool
}
func (dist *parcelDistincter) Compile(
fields []string,
lang int,
) ParcelHandler {
if len(fields) == 0 {
return dist.terminal
}
handler := dist.Compile(fields[1:], lang)
return compileRowHandler(fields[0], handler, lang)
}
func (dist *parcelDistincter) Execute(
parcels Parcels,
hasher ParcelHasher,
) Parcels {
if dist.disabled || len(parcels) <= 1 {
return parcels
}
var res Parcels
items := make(map[string]bool, len(parcels))
for _, parcel := range parcels {
hash := md5.New()
hasher.Hash(parcel, hash)
key := hex.EncodeToString(hash.Sum(nil))
if _, exists := items[key]; exists {
continue
}
items[key] = true
res = append(res, parcel)
}
return res
}
func NewParcelDistinctrer(
terminal ParcelHandler,
enabled bool,
) ParcelDistincter {
return &parcelDistincter{
terminal: terminal,
disabled: !enabled,
}
}
type parcelGrouper struct {
terminal ParcelHandler
disabled bool
}
func (grouper *parcelGrouper) Compile(
fields []string,
lang int,
) ParcelHandler {
if len(fields) == 0 {
return grouper.terminal
}
handler := grouper.Compile(fields[1:], lang)
return compileRowHandler(fields[0], handler, lang)
}
func (grouper *parcelGrouper) Execute(
parcels Parcels,
hasher ParcelHasher,
) map[string]Parcels {
if grouper.disabled || len(parcels) <= 1 {
return map[string]Parcels{"": parcels}
}
gs := make(map[string]Parcels, 128)
for _, parcel := range parcels {
hash := md5.New()
hasher.Hash(parcel, hash)
key := hex.EncodeToString(hash.Sum(nil))
if g, ok := gs[key]; ok {
gs[key] = append(g, parcel)
} else {
gs[key] = Parcels{parcel}
}
}
return gs
}
func NewParcelGrouper(
terminal ParcelHandler,
enabled bool,
) ParcelGrouper {
return &parcelGrouper{
terminal: terminal,
disabled: !enabled,
}
}
*/ |
/*
You are given an integer n.
Each number from 1 to n is grouped according to the sum of its digits.
Return the number of groups that have the largest size.
Example 1:
Input: n = 13
Output: 4
Explanation: There are 9 groups in total, they are grouped according sum of its digits of numbers from 1 to 13:
[1,10], [2,11], [3,12], [4,13], [5], [6], [7], [8], [9].
There are 4 groups with largest size.
Example 2:
Input: n = 2
Output: 2
Explanation: There are 2 groups [1], [2] of size 1.
Constraints:
1 <= n <= 10^4
*/
package main
func main() {
assert(largestgroup(13) == 4)
assert(largestgroup(2) == 2)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func digitsum(n int) int {
r := 0
for ; n > 0; n /= 10 {
r += n % 10
}
return r
}
func largestgroup(n int) int {
c := 0
r := 0
m := make(map[int]int)
for i := 1; i <= n; i++ {
k := digitsum(i)
if m[k]++; m[k] > c {
c = m[k]
r = 1
} else if m[k] == c {
r += 1
}
}
return r
}
|
package common
import (
"bufio"
"container/list"
"encoding/json"
log "github.com/cihub/seelog"
"os"
"sync"
)
/* Interface Comment
* for push and fetch, msg deal function is diffrent
* implement it at rocketxclean and rocketxfetch
*/
type taskDealMsg interface {
DealMsg(msgStruct InputMsg, msgDealCount int) int
}
/* Interface Comment
* task manage has two strategies:
* 1. hs, every host's task list has its own manager and dealers,
* 2. ms, all host's task list share one common manager and common dealers
* for every ticker, manager fetch one element from every task list and
* push it to common dealers.
*/
type Strategies interface {
GoRecieveMsg()
storeTask()
}
/* 记录当前处理的task 节点, e 为节点元素; l 为e 所属的 task list */
type dealElem struct {
l *list.List
e *list.Element
}
type taskNode struct {
/* if not use pointer cannot change it when node := e.Value.(taskNode)) node.retryTimes++ */
retryTimes int
InputMsg
}
var (
MsgChan chan InputMsg
ExitChan chan struct{}
MsgFeedbackMap sync.Map
)
/* store task list to disk when exit */
func storeTaskToDisk(f *os.File, l *list.List) {
for e := l.Front(); e != nil; e = e.Next() {
tN := e.Value.(taskNode)
msgStruct := tN.InputMsg
b, bErr := json.Marshal(&msgStruct)
if bErr != nil {
log.Info("ERROR ", bErr, " ", msgStruct)
continue
} else {
s := string(b)
l := len(s)
n, wErr := f.WriteString(string(b) + "\n")
if wErr != nil || n != l+1 {
log.Info("ERROR ", wErr, " ", msgStruct, "n ", n, " Len", l)
continue
}
log.Info(string(b))
}
}
}
/* redo task list on disk when start */
func RebuildTask(rebuildFilename string) {
f, fErr := os.Open(rebuildFilename)
if fErr != nil {
log.Info("Error: ", fErr)
return
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
msgStruct := InputMsg{}
if jsonErr := json.Unmarshal([]byte(scanner.Text()), &msgStruct); jsonErr != nil {
log.Info("ERROR: ", jsonErr, "input ", scanner.Text())
}
MsgChan <- msgStruct
log.Info(scanner.Text())
}
err := os.Remove(rebuildFilename)
if err != nil {
log.Info(err)
}
}
func ChooseStrategy(strategy, rebuildFilename string, maxGorutines, dealInterval, reconsumeListLen int, t taskDealMsg) Strategies{
switch strategy {
case "rs":
rsStrategy := ReconsumerStrategy{}
rsStrategy.Init(rebuildFilename, maxGorutines, dealInterval, reconsumeListLen, t)
// rsStrategy.Init(rebuildFilename, common.PushCfg.PushMaxGorutines, common.PushCfg.PushListIterTicker, common.PushCfg.PushReconsumeListLen)
return &rsStrategy
case "ms":
fallthrough
default:
log.Info("ERROR: strategy config is not ms and rs")
msStrategy := MaxGorutinesStrategy{}
msStrategy.Init(rebuildFilename, maxGorutines, dealInterval,t)
// msStrategy.Init(rebuildFilename,common.PushCfg.PushMaxGorutines, common.PushCfg.PushListIterTicker)
return &msStrategy
}
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
"context"
"database/sql"
"slices"
"strings"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/br/pkg/version"
tcontext "github.com/pingcap/tidb/dumpling/context"
clientv3 "go.etcd.io/etcd/client/v3"
)
const tidbServerInformationPath = "/tidb/server/info"
func getPdDDLIDs(pCtx context.Context, cli *clientv3.Client) ([]string, error) {
ctx, cancel := context.WithTimeout(pCtx, 10*time.Second)
defer cancel()
resp, err := cli.Get(ctx, tidbServerInformationPath, clientv3.WithPrefix())
if err != nil {
return nil, errors.Trace(err)
}
pdDDLIds := make([]string, len(resp.Kvs))
for i, kv := range resp.Kvs {
items := strings.Split(string(kv.Key), "/")
pdDDLIds[i] = items[len(items)-1]
}
return pdDDLIds, nil
}
func checkSameCluster(tctx *tcontext.Context, db *sql.DB, pdAddrs []string) (bool, error) {
cli, err := clientv3.New(clientv3.Config{
Endpoints: pdAddrs,
DialTimeout: defaultEtcdDialTimeOut,
AutoSyncInterval: 30 * time.Second,
})
if err != nil {
return false, errors.Trace(err)
}
tidbDDLIDs, err := GetTiDBDDLIDs(tctx, db)
if err != nil {
return false, err
}
pdDDLIDs, err := getPdDDLIDs(tctx, cli)
if err != nil {
return false, err
}
slices.Sort(tidbDDLIDs)
slices.Sort(pdDDLIDs)
return sameStringArray(tidbDDLIDs, pdDDLIDs), nil
}
func sameStringArray(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func string2Map(a, b []string) map[string]string {
a2b := make(map[string]string, len(a))
for i, str := range a {
a2b[str] = b[i]
}
return a2b
}
func needRepeatableRead(serverType version.ServerType, consistency string) bool {
return consistency != ConsistencyTypeSnapshot || serverType != version.ServerTypeTiDB
}
func infiniteChan[T any]() (chan<- T, <-chan T) {
in, out := make(chan T), make(chan T)
go func() {
var (
q []T
e T
ok bool
)
handleRead := func() bool {
if !ok {
for _, e = range q {
out <- e
}
close(out)
return true
}
q = append(q, e)
return false
}
for {
if len(q) > 0 {
select {
case e, ok = <-in:
if handleRead() {
return
}
case out <- q[0]:
q = q[1:]
}
} else {
e, ok = <-in
if handleRead() {
return
}
}
}
}()
return in, out
}
|
package observability_test
import (
"context"
"testing"
"time"
"github.com/syncromatics/kafmesh/internal/observability"
watchv1 "github.com/syncromatics/kafmesh/internal/protos/kafmesh/watch/v1"
"gotest.tools/assert"
)
func Test_Watcher(t *testing.T) {
watcher := &observability.Watcher{}
ctx, cancel := context.WithCancel(context.Background())
operation := &watchv1.Operation{}
watch1 := false
watch2 := false
go func() {
watcher.WatchProcessor(ctx, &watchv1.ProcessorRequest{
Component: "com1",
Processor: "proc1",
Key: "12",
}, func(m *watchv1.ProcessorResponse) error {
assert.DeepEqual(t, m, &watchv1.ProcessorResponse{
Operation: operation,
})
watch1 = true
return nil
})
}()
go func() {
watcher.WatchProcessor(ctx, &watchv1.ProcessorRequest{
Component: "com1",
Processor: "proc1",
Key: "12",
}, func(m *watchv1.ProcessorResponse) error {
assert.DeepEqual(t, m, &watchv1.ProcessorResponse{
Operation: operation,
})
watch2 = true
return nil
})
}()
for {
i, ok := watcher.WatchCount("com1", "proc1", "12")
if ok && i == 2 {
break
}
time.Sleep(time.Millisecond)
}
watcher.Send("com1", "proc1", "12", operation)
cancel()
assert.Assert(t, watch1)
assert.Assert(t, watch2)
}
|
package utils
import (
"fmt"
"os"
"strings"
"github.com/go-sql-driver/mysql"
"github.com/go-xorm/core"
"github.com/go-xorm/xorm"
)
// LoadMySQLConfigEnv initializes MySQL config using Environment Variables.
func LoadMySQLConfigEnv() *mysql.Config {
conf := &mysql.Config{
Net: "tcp",
Addr: os.Getenv("DATABASE_HOST"),
DBName: os.Getenv("DATABASE_NAME"),
User: os.Getenv("DATABASE_USER"),
Passwd: os.Getenv("DATABASE_PASSWORD"),
AllowNativePasswords: true,
}
return conf
}
// InitMySQLEngine initializes xorm engine.
func InitMySQLEngine(conf *mysql.Config) (*xorm.Engine, error) {
engine, err := xorm.NewEngine("mysql", conf.FormatDSN())
if err != nil {
return nil, err
}
engine.SetMapper(core.GonicMapper{})
charset, ok := conf.Params["charset"]
if !ok {
charset = "utf8mb4"
}
engine.Charset(charset)
engine.StoreEngine("InnoDb")
logLevel, err := parseLogLevel(os.Getenv("LOG_LEVEL"))
if err != nil {
return nil, err
}
engine.SetLogLevel(logLevel)
// Show sql log if logLevel is "debug" or "info".
engine.ShowSQL(logLevel == core.LOG_DEBUG || logLevel == core.LOG_INFO)
return engine, nil
}
func parseLogLevel(lvl string) (core.LogLevel, error) {
switch strings.ToLower(lvl) {
case "panic", "fatal", "error":
return core.LOG_ERR, nil
case "warn", "warning":
return core.LOG_WARNING, nil
case "info":
return core.LOG_INFO, nil
case "debug":
return core.LOG_DEBUG, nil
}
return core.LOG_DEBUG, fmt.Errorf("cannot parse \"%v\" into go-xorm/core.LogLevel", lvl)
}
// EscapeMySQLString prevents from SQL-injection.
func EscapeMySQLString(value string) string {
replace := map[string]string{
"\\": "\\\\",
"'": `\'`,
"\\0": "\\\\0",
"\n": "\\n",
"\r": "\\r",
`"`: `\"`,
"\x1a": "\\Z",
}
for b, a := range replace {
value = strings.Replace(value, b, a, -1)
}
return value
}
|
package main
import (
"fmt"
"github.com/spf13/viper"
"github.com/takeru56/cnos/cinii"
"log"
"net/url"
"os"
"strconv"
"github.com/spf13/cobra"
)
type booksFlags struct {
keyword string
title string
author string
publisher string
yearfrom int
yearto int
count int
sort int
lang int
}
var bf booksFlags
func init() {
rootCmd.AddCommand(booksCmd)
booksCmd.Flags().StringVarP(&bf.keyword, "keyword", "k", "", "Specifies keyword to be searched")
booksCmd.Flags().IntVarP(&bf.count, "count", "c", 20, "Specifies the number of search results per page")
booksCmd.Flags().IntVarP(&bf.sort, "sort", "s", 1, "Specifies the sorting condition [1:sorts by relevance 2:oldest at the top 3:newest at the top]")
booksCmd.Flags().IntVarP(&bf.lang, "lang", "l", 1, "Specifies the language the search results are displayed in [1:ja 2:en]")
booksCmd.Flags().StringVarP(&bf.title, "title", "t", "", "Searches by Books & Journals title")
booksCmd.Flags().StringVarP(&bf.author, "author", "a", "", "Searches by author name")
booksCmd.Flags().StringVarP(&bf.publisher, "publisher", "p", "", "Searches by pulibsher")
booksCmd.Flags().IntVarP(&bf.yearfrom, "yearfrom", "", 0, "Searches by year of publication")
booksCmd.Flags().IntVarP(&bf.yearto, "yearto", "", 0, "Searches by year of publication")
}
func (bf *booksFlags) setQueryValues(q *url.Values) {
if bf.keyword != "" {
q.Set("q", bf.keyword)
}
if bf.title != "" {
q.Set("title", bf.title)
}
if bf.author != "" {
q.Set("author", bf.author)
}
if bf.publisher != "" {
q.Set("publisher", bf.publisher)
}
if bf.yearfrom != 0 {
q.Set("year_from", strconv.Itoa(bf.yearfrom))
}
if bf.yearto != 0 {
q.Set("year_to", strconv.Itoa(bf.yearto))
}
if bf.lang == 2 {
q.Set("lang", "en")
}
q.Set("count", strconv.Itoa(bf.count))
q.Set("sortorder", strconv.Itoa(bf.sort))
}
var booksCmd = &cobra.Command{
Use: "books",
Short: "Search books and journals",
Long: `Books(cnos books)will search books and journals
on CiNii Books, and return matched title and url.`,
Run: func(cmd *cobra.Command, args []string) {
if af.keyword == "" && af.title == "" && af.author == "" && bf.publisher == "" && af.yearfrom == 0 && af.yearto == 0 {
fmt.Println("fatal: At leaset one flag without -c -s -f -l required\nRun 'cnos books --help' for usage.")
os.Exit(1)
}
client := cinii.NewClient()
q := url.Values{}
bf.setQueryValues(&q)
if viper.Get("appid") != nil {
q.Set("appid", viper.GetString("appid"))
}
result, err := client.SearchBooks(q)
if err != nil {
log.Fatal("fatal: failed to search articles\n", err)
}
if len(result.Graph[0].Items) == 0 {
fmt.Println("Search Results: 0")
return
} else {
fmt.Println("Search Results: ", result.Graph[0].OpensearchTotalResults)
}
for i, book := range result.Graph[0].Items {
fmt.Println("[", i+1, "]")
fmt.Println("title:", book.Title)
fmt.Println("author: ", book.DcCreator)
fmt.Println("date:", book.DcDate)
fmt.Println("url:", book.Link.ID)
fmt.Println("") // 空行
}
},
}
|
package backend
//go:generate mockgen -source=$GOFILE -destination=mock/mock_$GOFILE -package=mock
import (
"errors"
"fmt"
"sort"
"github.com/goropikari/psqlittle/core"
)
// DB is interface of DBMS
type DB interface {
GetTable(string) (Table, error)
CreateTable(string, core.Cols) error
DropTable(string) error
}
// Table is interface of table.
type Table interface {
Copy() Table
GetName() string
GetColNames() core.ColumnNames
GetRows() []Row
GetCols() core.Cols
InsertValues(core.ColumnNames, core.ValuesList) error
RenameTableName(string)
Project(core.ColumnNames, []func(Row) (core.Value, error)) (Table, error)
Where(func(Row) (core.Value, error)) (Table, error)
CrossJoin(Table) (Table, error)
OrderBy(core.ColumnNames, []int) (Table, error)
Limit(int) (Table, error)
Update(core.ColumnNames, func(Row) (core.Value, error), []func(Row) (core.Value, error)) (Table, error)
Delete(func(Row) (core.Value, error)) (Table, error)
}
// Row is interface of row of table.
type Row interface {
// GetValueByColName is used in ColRefNode when getting value
GetValueByColName(core.ColumnName) (core.Value, error)
GetValues() core.Values
GetColNames() core.ColumnNames
UpdateValue(core.ColumnName, core.Value)
}
// Database is struct for Database
type Database struct {
Tables map[string]*DBTable
}
// NewDatabase is constructor of Database
func NewDatabase() *Database {
return &Database{
Tables: make(map[string]*DBTable),
}
}
// CreateTable is method to create table
func (db *Database) CreateTable(tableName string, cols core.Cols) error {
if _, ok := db.Tables[tableName]; ok {
return fmt.Errorf(`ERROR: relation %v already exist`, tableName)
}
colNames := make(core.ColumnNames, 0, len(cols))
for _, col := range cols {
colNames = append(colNames, col.ColName)
}
db.Tables[tableName] = &DBTable{
Name: tableName,
ColNames: colNames,
Cols: cols,
Rows: make(DBRows, 0),
}
return nil
}
// GetTable gets table from DB
func (db *Database) GetTable(tableName string) (Table, error) {
if _, ok := db.Tables[tableName]; !ok {
return nil, fmt.Errorf(`ERROR: relation "%v" does not exist`, tableName)
}
tb := db.Tables[tableName]
return tb, nil
}
// DropTable drop table from DB
func (db *Database) DropTable(tableName string) error {
if _, ok := db.Tables[tableName]; ok {
delete(db.Tables, tableName)
return nil
}
return fmt.Errorf(`ERROR: relation "%v" does not exist`, tableName)
}
// DBRow is struct of row of table
type DBRow struct {
ColNames core.ColumnNames
Values core.Values
}
// DBRows is list of DBRow
type DBRows []*DBRow
type ErrColumnNotFound int
const (
ColumnNotFound ErrColumnNotFound = iota
)
// GetValueByColName gets value from row by ColName
func (r *DBRow) GetValueByColName(name core.ColumnName) (core.Value, error) {
for k, v := range r.ColNames {
if v == name {
return r.Values[k], nil
}
}
return nil, fmt.Errorf(`ERROR: column "%v" does not exist`, name.String())
}
// GetValues gets values from DBRow
func (r *DBRow) GetValues() core.Values {
return r.Values
}
// GetColNames gets column names from DBRow
func (r *DBRow) GetColNames() core.ColumnNames {
return r.ColNames
}
// UpdateValue updates value by specifing column name
func (r *DBRow) UpdateValue(name core.ColumnName, val core.Value) {
for k, colName := range r.ColNames {
if colName.Name == name.Name {
r.Values[k] = val
}
}
}
// Copy copies DBRow
func (r *DBRow) Copy() *DBRow {
vals := make(core.Values, len(r.Values))
copy(vals, r.Values)
names := make(core.ColumnNames, len(r.ColNames))
copy(names, r.ColNames)
return &DBRow{
ColNames: names,
Values: vals,
}
}
// Copy copies DBRows
func (r DBRows) Copy() DBRows {
rows := make(DBRows, len(r))
for k, row := range r {
rows[k] = row.Copy()
}
return rows
}
// ColumnID is type of column id (index of column).
type ColumnID int
// getByID is method to get column value by ColumnID
func (r *DBRow) getByID(i ColumnID) core.Value {
return r.Values[i]
}
// ColNameIndexes is map ColName to corresponding column index
type ColNameIndexes map[core.ColumnName]int
// Copy copies ColNameIndexes
func (c ColNameIndexes) Copy() ColNameIndexes {
indexes := make(ColNameIndexes)
for key, val := range c {
indexes[key] = val
}
return indexes
}
// DBTable is struct for DBTable
type DBTable struct {
Name string
ColNames core.ColumnNames
Cols core.Cols
Rows DBRows
}
// Copy copies DBTable
func (t *DBTable) Copy() Table {
tb := &DBTable{
ColNames: t.ColNames.Copy(),
Cols: t.Cols.Copy(),
Rows: t.Rows.Copy(),
}
return tb
}
// GetName return table name
func (t *DBTable) GetName() string {
return t.Name
}
// GetColNames return column names of table
func (t *DBTable) GetColNames() core.ColumnNames {
return t.ColNames
}
// GetCols return column names of table
func (t *DBTable) GetCols() core.Cols {
return t.Cols
}
// SetColNames sets ColNames in Table
func (t *DBTable) SetColNames(names core.ColumnNames) {
t.ColNames = names
}
// GetRows gets rows from given table
func (t *DBTable) GetRows() []Row {
// ref: https://stackoverflow.com/a/12994852
rows := make([]Row, 0, len(t.Rows))
for _, row := range t.Rows {
rows = append(rows, row)
}
return rows
}
// InsertValues inserts values into the table
func (t *DBTable) InsertValues(names core.ColumnNames, valsList core.ValuesList) error {
if len(names) == 0 {
names = t.GetColNames()
}
colNames := t.GetColNames()
err := t.validateInsert(names, valsList)
if err != nil {
return err
}
numCols := len(colNames)
indexes := make([]int, 0)
for _, name := range names {
for k, v := range colNames {
if name == v {
indexes = append(indexes, k)
}
}
}
for _, vals := range valsList {
row := &DBRow{ColNames: colNames, Values: make(core.Values, numCols)}
for vi, ci := range indexes {
row.Values[ci] = vals[vi]
}
t.Rows = append(t.Rows, row)
}
return nil
}
func (t *DBTable) validateInsert(names core.ColumnNames, valuesList core.ValuesList) error {
for _, vals := range valuesList {
if len(names) != len(vals) {
return errors.New("invalid insert elements")
}
}
// TODO: 型で validation かける
return nil
}
// RenameTableName updates table name
func (t *DBTable) RenameTableName(name string) {
t.Name = name
for i := 0; i < len(t.ColNames); i++ {
t.ColNames[i].TableName = name
}
for i := 0; i < len(t.Cols); i++ {
t.Cols[i].ColName.TableName = name
}
for i := 0; i < len(t.Rows); i++ {
for j := 0; j < len(t.Rows[i].ColNames); j++ {
t.Rows[i].ColNames[j].TableName = name
}
}
}
// Project is method to select columns of table.
func (t *DBTable) Project(TargetColNames core.ColumnNames, resFuncs []func(Row) (core.Value, error)) (Table, error) {
rows := t.GetRows()
if len(rows) == 0 {
return t, nil
}
newRows := make(DBRows, 0, len(rows))
for _, row := range t.Rows {
colNames := make(core.ColumnNames, 0)
vals := make(core.Values, 0)
for k, fn := range resFuncs {
v, err := fn(row)
if err != nil {
return nil, err
}
if v != core.Wildcard {
if v == ColumnNotFound {
return nil, fmt.Errorf(`ERROR: column "%v" does not exist`, TargetColNames[k])
}
vals = append(vals, v)
colNames = append(colNames, TargetColNames[k])
} else {
// column wildcard
// Add values
for _, val := range row.GetValues() {
if val == nil {
// Fix me: nil should be converted
// when the value is inserted.
vals = append(vals, core.Null)
} else {
vals = append(vals, val)
}
}
// Add columns
for _, name := range t.GetColNames() {
colNames = append(colNames, name)
}
}
}
row.Values = vals
row.ColNames = colNames
newRows = append(newRows, row)
}
t.Rows = newRows
tbColNames := make(core.ColumnNames, 0)
for _, name := range newRows[0].ColNames {
tbColNames = append(tbColNames, name)
}
t.ColNames = tbColNames
// TODO: implement SetCols if type validation is implemented
// newTable.SetCols(cols)
return t, nil
}
// Where filters rows by given where conditions
func (t *DBTable) Where(condFn func(Row) (core.Value, error)) (Table, error) {
srcRows := t.Rows
rows := make([]*DBRow, 0)
for _, row := range srcRows {
v, err := condFn(row)
if err != nil {
return nil, err
}
if v == core.True {
rows = append(rows, row)
}
}
t.Rows = rows
return t, nil
}
// CrossJoin took cross join given tables
func (t *DBTable) CrossJoin(rtb Table) (Table, error) {
ns := uniteColNames(t.GetColNames(), rtb.GetColNames())
cols := uniteCols(t.GetCols(), rtb.GetCols())
rows := make([]*DBRow, 0)
rs1 := t.GetRows()
rs2 := rtb.GetRows()
for _, r1 := range rs1 {
for _, r2 := range rs2 {
rows = append(rows, uniteRow(r1, r2).(*DBRow))
}
}
return &DBTable{
ColNames: ns,
Cols: cols,
Rows: rows,
}, nil
}
func uniteRow(r1, r2 Row) Row {
vals := make(core.Values, 0)
for _, v := range r1.GetValues() {
vals = append(vals, v)
}
for _, v := range r2.GetValues() {
vals = append(vals, v)
}
cols := make(core.ColumnNames, 0)
for _, c := range r1.GetColNames() {
cols = append(cols, c)
}
for _, c := range r2.GetColNames() {
cols = append(cols, c)
}
return &DBRow{
ColNames: cols,
Values: vals,
}
}
func uniteColNames(lcs, rcs core.ColumnNames) core.ColumnNames {
ns := make(core.ColumnNames, 0)
for _, c := range lcs {
ns = append(ns, c)
}
for _, c := range rcs {
ns = append(ns, c)
}
return ns
}
func uniteCols(l, r core.Cols) core.Cols {
cs := make(core.Cols, 0)
for _, c := range l {
cs = append(cs, c)
}
for _, c := range l {
cs = append(cs, c)
}
return cs
}
// OrderBy sorts rows by given column names
func (t *DBTable) OrderBy(cols core.ColumnNames, sortDirs []int) (Table, error) {
if err := validateOrderByColumn(t.ColNames, cols); err != nil {
return nil, err
}
rows := t.Rows
name := cols[0]
sortDir := sortDirs[0]
sort.Slice(rows, func(i, j int) bool {
l, _ := rows[i].GetValueByColName(name)
r, _ := rows[j].GetValueByColName(name)
return core.LessForSort(l, r, sortDir)
})
t.Rows = rows
return t, nil
}
func validateOrderByColumn(tbCols, targets core.ColumnNames) error {
for _, tc := range targets {
if (tc == core.ColumnName{}) {
// expresison
continue
}
if !haveColumn(tc, tbCols) {
return fmt.Errorf(`column "%v" does not exist`, tc.String())
}
}
return nil
}
func haveColumn(c core.ColumnName, cs core.ColumnNames) bool {
for _, col := range cs {
if c == col {
return true
}
}
return false
}
// Limit selects limited number of record
func (t *DBTable) Limit(N int) (Table, error) {
if len(t.Rows) <= N {
return t, nil
}
oldRows := t.GetRows()
newRows := make([]*DBRow, 0)
for i := 0; i < N; i++ {
row := oldRows[i]
newRows = append(newRows,
&DBRow{
ColNames: row.GetColNames(),
Values: row.GetValues(),
})
}
return &DBTable{
ColNames: t.GetColNames(),
Cols: t.GetCols(),
Rows: newRows,
}, nil
}
// Update updates records
func (t *DBTable) Update(colNames core.ColumnNames, condFn func(Row) (core.Value, error), assignValFns []func(Row) (core.Value, error)) (Table, error) {
rows := t.Rows
for _, row := range rows {
a, err := condFn(row)
if err != nil {
return nil, err
}
if a == core.True {
for k, name := range colNames {
v, err := assignValFns[k](row)
if err != nil {
return nil, err
}
row.UpdateValue(name, v)
}
}
}
return nil, nil
}
func (t *DBTable) Delete(condFn func(Row) (core.Value, error)) (Table, error) {
updatedRows := make([]*DBRow, 0)
for _, row := range t.Rows {
v, err := condFn(row)
if err != nil {
return nil, err
}
if v == core.True {
continue
} else {
updatedRows = append(updatedRows, row)
}
}
t.Rows = updatedRows
return nil, nil
}
func (t *DBTable) toIndex(names core.ColumnNames) ([]ColumnID, error) {
idxs := make([]ColumnID, 0, len(names))
rawNames := t.GetColNames()
for _, name := range names {
for k, rawName := range rawNames {
if name.Equal(rawName) {
idxs = append(idxs, ColumnID(k))
} else {
return nil, ErrIndexNotFound
}
}
}
return idxs, nil
}
|
package structNovedad
import (
"time"
"github.com/xubiosueldos/conexionBD/Concepto/structConcepto"
"github.com/xubiosueldos/conexionBD/Legajo/structLegajo"
"github.com/xubiosueldos/conexionBD/structGormModel"
)
type Novedad struct {
structGormModel.GormModel
Nombre string `json:"nombre"`
Codigo string `json:"codigo"`
Descripcion string `json:"descripcion"`
Activo int `json:"activo"`
Importe *float32 `json:"importe" sql:"type:decimal(19,4);"`
Cantidad float64 `json:"cantidad" sql:"type:decimal(19,4);"`
Fecha *time.Time `json:"fecha" gorm:"not null"`
Legajo *structLegajo.Legajo `json:"legajo" gorm:"ForeignKey:Legajoid;association_foreignkey:ID;association_autoupdate:false;not null;PRELOAD:false"`
Legajoid *int `json:"legajoid" sql:"type:int REFERENCES Legajo(ID)" gorm:"not null"`
Concepto *structConcepto.Concepto `json:"concepto" gorm:"ForeignKey:Conceptoid;association_foreignkey:ID;association_autoupdate:false;not null"`
Conceptoid *int `json:"conceptoid" gorm:"not null"`
}
|
package main
import (
"encoding/json"
"github.com/gorilla/mux"
"log"
"net/http"
)
type Provider struct {
ID string `json:"providerID,omitempty"`
Name string `json:"name,omitempty"`
Tier string `json:"tier,omitempty"`
Links *Links `json:"links,omitempty"`
}
type Links struct {
Href string `json:"href,omitempty"`
Rel string `json:"lawnmowers"`
Type string `json:"GET"`
}
var providers []Provider
func GetProviders(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(providers)
}
func GetProvider(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
for _, item := range providers {
if item.ID == params["id"] {
json.NewEncoder(w).Encode(item)
return
}
}
}
func CreateProvider(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
var provider Provider
_ = json.NewDecoder(r.Body).Decode(&provider)
provider.ID = params["id"]
providers = append(providers, provider)
json.NewEncoder(w).Encode(providers)
}
func DeleteProvider(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
for index, item := range providers {
if item.ID == params["id"] {
providers = append(providers[:index], providers[index+1:]...)
break
}
json.NewEncoder(w).Encode(providers)
}
}
func main() {
providers = append(providers, Provider{ID: "1", Name: "lawncare", Tier: "1", Links: &Links{Href: "/lawnmowers", Rel: "Lawnmowers", Type: "GET"}})
providers = append(providers, Provider{ID: "2", Name: "window cleaning", Tier: "1", Links: &Links{Href: "/windowcleaners", Rel: "windowcleaners", Type: "GET"}})
providers = append(providers, Provider{ID: "3", Name: "house cleaning", Tier: "1", Links: &Links{Href: "/housecleaners", Rel: "housecleaners", Type: "GET"}})
providers = append(providers, Provider{ID: "4", Name: "plumbing", Tier: "2", Links: &Links{Href: "/plumbers", Rel: "plumbers", Type: "GET"}})
providers = append(providers, Provider{ID: "5", Name: "lawyers", Tier: "5", Links: &Links{Href: "/lawyers", Rel: "lawyers", Type: "GET"}})
router := mux.NewRouter()
router.HandleFunc("/providers", GetProviders).Methods("GET")
router.HandleFunc("/providers/{id}", GetProvider).Methods("GET")
router.HandleFunc("/providers/{id}", CreateProvider).Methods("GET")
router.HandleFunc("/providers/{id}", DeleteProvider).Methods("DELETE")
log.Fatal(http.ListenAndServe(":7555", router))
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package armhelpers
import (
"context"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-03-30/compute"
azcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/google/go-cmp/cmp"
)
func TestListVirtualMachineScaleSets(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterListVirtualMachineScaleSets()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
list := &VirtualMachineScaleSetListValues{}
err = unmarshalFromString(mc.ResponseListVirtualMachineScaleSets, &list)
if err != nil {
t.Error(err)
}
listExpected := []azcompute.VirtualMachineScaleSet{}
if err := DeepCopy(&listExpected, list.Value); err != nil {
t.Fatal(err)
}
for page, err := azureClient.ListVirtualMachineScaleSets(context.Background(), resourceGroup); page.NotDone(); err = page.Next() {
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(page.Values(), listExpected); diff != "" {
t.Errorf("Fail to compare, Virtual Machine Scale Set %q", diff)
}
}
}
func TestListVirtualMachineScaleSetVMs(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterListVirtualMachineScaleSetVMs()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
list := &VirtualMachineScaleSetVMValues{}
err = unmarshalFromString(mc.ResponseListVirtualMachineScaleSetVMs, &list)
if err != nil {
t.Error(err)
}
listExpected := []azcompute.VirtualMachineScaleSetVM{}
if err = DeepCopy(&listExpected, list.Value); err != nil {
t.Fatal(err)
}
for page, err := azureClient.ListVirtualMachineScaleSetVMs(context.Background(), resourceGroup, virtualMachineScaleSetName); page.NotDone(); err = page.Next() {
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(page.Values(), listExpected); diff != "" {
t.Errorf("Fail to compare, Virtual Machine Scale Set VMs %q", diff)
}
}
}
func TestListVirtualMachines(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterListVirtualMachines()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
list := &VirtualMachineVMValues{}
err = unmarshalFromString(mc.ResponseListVirtualMachines, &list)
if err != nil {
t.Error(err)
}
listExpected := []azcompute.VirtualMachine{}
if err = DeepCopy(&listExpected, list.Value); err != nil {
t.Fatal(err)
}
for page, err := azureClient.ListVirtualMachines(context.Background(), resourceGroup); page.NotDone(); err = page.Next() {
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(page.Values(), listExpected); diff != "" {
t.Errorf("Fail to compare, Virtual Machines %q", diff)
}
}
}
func TestGetVirtualMachine(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterVirtualMachineEndpoint()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
vm := compute.VirtualMachine{}
err = unmarshalFromString(mc.ResponseGetVirtualMachine, &vm)
if err != nil {
t.Error(err)
}
vmExpected := azcompute.VirtualMachine{}
if err = DeepCopy(&vmExpected, vm); err != nil {
t.Error(err)
}
vmActual, err := azureClient.GetVirtualMachine(context.Background(), resourceGroup, virtualMachineName)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(vmActual.VirtualMachineProperties, vmExpected.VirtualMachineProperties); diff != "" {
t.Errorf("Fail to compare, Virtual Machine VirtualMachineProperties %q", diff)
}
if diff := cmp.Diff(vmActual.Name, vmExpected.Name); diff != "" {
t.Errorf("Fail to compare, Virtual Machine Name %q", diff)
}
if diff := cmp.Diff(vmActual.Tags, vmExpected.Tags); diff != "" {
t.Errorf("Fail to compare, Virtual Machine Tags %q", diff)
}
if diff := cmp.Diff(vmActual.Location, vmExpected.Location); diff != "" {
t.Errorf("Fail to compare, Virtual Machine Location %q", diff)
}
}
func TestDeleteVirtualMachine(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterVirtualMachineEndpoint()
mc.RegisterDeleteOperation()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
err = azureClient.DeleteVirtualMachine(context.Background(), resourceGroup, virtualMachineName)
if err != nil {
t.Error(err)
}
}
func TestGetAvailabilitySet(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterGetAvailabilitySet()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
vmas, err := azureClient.GetAvailabilitySet(context.Background(), resourceGroup, virtualMachineAvailabilitySetName)
if err != nil {
t.Fatalf("can't get availability set: %s", err)
}
var expected int32 = 3
if *vmas.PlatformFaultDomainCount != expected {
t.Fatalf("expected PlatformFaultDomainCount of %d but got %v", expected, *vmas.PlatformFaultDomainCount)
}
if *vmas.PlatformUpdateDomainCount != expected {
t.Fatalf("expected PlatformUpdateDomainCount of %d but got %v", expected, *vmas.PlatformUpdateDomainCount)
}
if vmas.ProximityPlacementGroup != nil && vmas.ProximityPlacementGroup.ID != nil {
t.Fatalf("expected ProximityPlacementGroup of %q but got %v", "", *vmas.ProximityPlacementGroup.ID)
}
l := "eastus"
if *vmas.Location != l {
t.Fatalf("expected Location of %s but got %v", l, *vmas.Location)
}
}
func TestGetAvailabilitySetFaultDomainCount(t *testing.T) {
mc, err := NewHTTPMockClient()
if err != nil {
t.Fatalf("failed to create HttpMockClient - %s", err)
}
mc.RegisterLogin()
mc.RegisterGetAvailabilitySetFaultDomainCount()
err = mc.Activate()
if err != nil {
t.Fatalf("failed to activate HttpMockClient - %s", err)
}
defer mc.DeactivateAndReset()
env := mc.GetEnvironment()
azureClient, err := NewAzureClientWithClientSecret(env, subscriptionID, "clientID", "secret")
if err != nil {
t.Fatalf("can not get client %s", err)
}
count, err := azureClient.GetAvailabilitySetFaultDomainCount(context.Background(), resourceGroup, []string{"id1", "id2"})
if err != nil {
t.Fatalf("can't get availability set platform fault domain count: %s", err)
}
expected := 3
if count != expected {
t.Fatalf("platform fault domain count: expected %d but got %d", expected, count)
}
}
|
package main
import (
"fmt"
"os"
"github.com/gyepisam/redux"
)
var cmdInit = &Command{
Run: runInit,
LinkName: "redo-init",
UsageLine: "redux init [OPTIONS] [DIRECTORY ...]",
Short: "Creates or reinitializes one or more redo root directories.",
}
func init() {
text := `
If one or more DIRECTORY arguments are specified, the command initializes each one.
If no arguments are provided, but an environment variable named %s exists, it is initialized.
If neither arguments nor an environment variable is provided, the current directory is initialized.
`
cmdInit.Long = fmt.Sprintf(text, redux.REDO_DIR_ENV_NAME)
}
func runInit(args []string) error {
if len(args) == 0 {
if value := os.Getenv(redux.REDO_DIR_ENV_NAME); value != "" {
args = append(args, value)
}
}
if len(args) == 0 {
args = append(args, ".")
}
for _, dir := range args {
if err := redux.InitDir(dir); err != nil {
return fmt.Errorf("cannot initialize directory: %s", err)
}
}
return nil
}
|
package mock
import "github.com/10gen/realm-cli/internal/cloud/atlas"
// AtlasClient is a mocked Atlas client
type AtlasClient struct {
atlas.Client
GroupsFn func() ([]atlas.Group, error)
ClustersFn func(groupID string) ([]atlas.Cluster, error)
DatalakesFn func(groupID string) ([]atlas.Datalake, error)
}
// Groups calls the mocked Groups implementation if provided,
// otherwise the call falls back to the underlying atlas.Client implementation.
// NOTE: this may panic if the underlying atlas.Client is left undefined
func (ac AtlasClient) Groups() ([]atlas.Group, error) {
if ac.GroupsFn != nil {
return ac.GroupsFn()
}
return ac.Client.Groups()
}
// Clusters calls the mocked Clusters implementation if provided,
// otherwise the call falls back to the underlying atlas.Client implementation.
// NOTE: this may panic if the underlying atlas.Client is left undefined
func (ac AtlasClient) Clusters(groupID string) ([]atlas.Cluster, error) {
if ac.ClustersFn != nil {
return ac.ClustersFn(groupID)
}
return ac.Client.Clusters(groupID)
}
// Datalakes calls the mocked Datalakes implementation if provided,
// otherwise the call falls back to the underlying atlas.Client implementation.
// NOTE: this may panic if the underlying atlas.Client is left undefined
func (ac AtlasClient) Datalakes(groupID string) ([]atlas.Datalake, error) {
if ac.DatalakesFn != nil {
return ac.DatalakesFn(groupID)
}
return ac.Client.Datalakes(groupID)
}
|
package dates_utils
import "time"
const format = "01-02-2006 15:04:05Z"
func GetNow() time.Time {
return time.Now().UTC()
}
func GetNowString() string {
return GetNow().Format(format)
}
|
package main
import (
"fmt"
"github.com/cosmos/ethermint/version"
"github.com/spf13/cobra"
)
// var (
// VERSION string
// BUILD_TIME string
// GO_VERSION string
// GIT_BRANCH string
// COMMIT_SHA1 string
// )
var versionCmd = &cobra.Command{
Use: "v",
Short: "Print the version number of ethermint",
Long: `This is ethermint's version`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("version:\t%s \nbuild time:\t%s\ngit branch:\t%s\ngit commit:\t%s\ngo version:\t%s\n", version.VERSION, version.BUILD_TIME, version.GIT_BRANCH, version.COMMIT_SHA1, version.GO_VERSION)
},
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package testcore
import (
"github.com/iotaledger/wasp/packages/vm/core/testcore/sandbox_tests/test_sandbox_sc"
"testing"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/solo"
"github.com/iotaledger/wasp/packages/vm/core/accounts"
"github.com/iotaledger/wasp/packages/vm/core/blob"
"github.com/iotaledger/wasp/packages/vm/core/root"
"github.com/stretchr/testify/require"
)
func TestRootBasic(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
chain.CheckChain()
chain.Log.Infof("\n%s\n", chain.String())
}
func TestRootRepeatInit(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
chain.CheckChain()
req := solo.NewCallParams(root.Interface.Name, "init")
_, err := chain.PostRequest(req, nil)
require.Error(t, err)
}
func TestGetInfo(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
info, contracts := chain.GetInfo()
require.EqualValues(t, chain.ChainID, info.ChainID)
require.EqualValues(t, chain.ChainColor, info.ChainColor)
require.EqualValues(t, chain.ChainAddress, info.ChainAddress)
require.EqualValues(t, chain.OriginatorAgentID, info.ChainOwnerID)
require.EqualValues(t, 4, len(contracts))
_, ok := contracts[root.Interface.Hname()]
require.True(t, ok)
recBlob, ok := contracts[blob.Interface.Hname()]
require.True(t, ok)
_, ok = contracts[accounts.Interface.Hname()]
require.True(t, ok)
rec, err := chain.FindContract(blob.Interface.Name)
require.NoError(t, err)
require.EqualValues(t, root.EncodeContractRecord(recBlob), root.EncodeContractRecord(rec))
}
func TestDeployExample(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
name := "testInc"
err := chain.DeployContract(nil, name, test_sandbox_sc.Interface.ProgramHash)
require.NoError(t, err)
info, contracts := chain.GetInfo()
require.EqualValues(t, chain.ChainID, info.ChainID)
require.EqualValues(t, chain.OriginatorAgentID, info.ChainOwnerID)
require.EqualValues(t, 5, len(contracts))
_, ok := contracts[root.Interface.Hname()]
require.True(t, ok)
_, ok = contracts[blob.Interface.Hname()]
require.True(t, ok)
_, ok = contracts[accounts.Interface.Hname()]
require.True(t, ok)
rec, ok := contracts[coretypes.Hn(name)]
require.True(t, ok)
require.EqualValues(t, name, rec.Name)
require.EqualValues(t, "N/A", rec.Description)
require.EqualValues(t, 0, rec.OwnerFee)
require.EqualValues(t, chain.OriginatorAgentID, rec.Creator)
require.EqualValues(t, test_sandbox_sc.Interface.ProgramHash, rec.ProgramHash)
recFind, err := chain.FindContract(name)
require.NoError(t, err)
require.EqualValues(t, root.EncodeContractRecord(recFind), root.EncodeContractRecord(rec))
}
func TestDeployDouble(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
name := "testInc"
err := chain.DeployContract(nil, name, test_sandbox_sc.Interface.ProgramHash)
require.NoError(t, err)
err = chain.DeployContract(nil, name, test_sandbox_sc.Interface.ProgramHash)
require.Error(t, err)
info, contracts := chain.GetInfo()
require.EqualValues(t, chain.ChainID, info.ChainID)
require.EqualValues(t, chain.OriginatorAgentID, info.ChainOwnerID)
require.EqualValues(t, 5, len(contracts))
_, ok := contracts[root.Interface.Hname()]
require.True(t, ok)
_, ok = contracts[blob.Interface.Hname()]
require.True(t, ok)
_, ok = contracts[accounts.Interface.Hname()]
require.True(t, ok)
rec, ok := contracts[coretypes.Hn(name)]
require.True(t, ok)
require.EqualValues(t, name, rec.Name)
require.EqualValues(t, "N/A", rec.Description)
require.EqualValues(t, 0, rec.OwnerFee)
require.EqualValues(t, chain.OriginatorAgentID, rec.Creator)
require.EqualValues(t, test_sandbox_sc.Interface.ProgramHash, rec.ProgramHash)
}
func TestChangeOwnerAuthorized(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
newOwner := env.NewSignatureSchemeWithFunds()
newOwnerAgentID := coretypes.NewAgentIDFromAddress(newOwner.Address())
req := solo.NewCallParams(root.Interface.Name, root.FuncDelegateChainOwnership, root.ParamChainOwner, newOwnerAgentID)
_, err := chain.PostRequest(req, nil)
require.NoError(t, err)
info, _ := chain.GetInfo()
require.EqualValues(t, chain.OriginatorAgentID, info.ChainOwnerID)
req = solo.NewCallParams(root.Interface.Name, root.FuncClaimChainOwnership)
_, err = chain.PostRequest(req, newOwner)
require.NoError(t, err)
info, _ = chain.GetInfo()
require.EqualValues(t, newOwnerAgentID, info.ChainOwnerID)
}
func TestChangeOwnerUnauthorized(t *testing.T) {
env := solo.New(t, false, false)
chain := env.NewChain(nil, "chain1")
defer chain.WaitForEmptyBacklog()
newOwner := env.NewSignatureSchemeWithFunds()
newOwnerAgentID := coretypes.NewAgentIDFromAddress(newOwner.Address())
req := solo.NewCallParams(root.Interface.Name, root.FuncDelegateChainOwnership, root.ParamChainOwner, newOwnerAgentID)
_, err := chain.PostRequest(req, newOwner)
require.Error(t, err)
info, _ := chain.GetInfo()
require.EqualValues(t, chain.OriginatorAgentID, info.ChainOwnerID)
}
|
package checker
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"log"
"os"
"time"
"github.com/cybertec-postgresql/vip-manager/vipconfig"
client "go.etcd.io/etcd/client/v3"
)
// EtcdLeaderChecker is used to check state of the leader key in Etcd
type EtcdLeaderChecker struct {
key string
nodename string
kapi client.KV
}
// naming this c_conf to avoid conflict with conf in etcd_leader_checker.go
var eConf *vipconfig.Config
func getTransport(conf *vipconfig.Config) (*tls.Config, error) {
var caCertPool *x509.CertPool
// create valid CertPool only if the ca certificate file exists
if conf.EtcdCAFile != "" {
caCert, err := os.ReadFile(conf.EtcdCAFile)
if err != nil {
return nil, fmt.Errorf("cannot load CA file: %s", err)
}
caCertPool = x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
}
var certificates []tls.Certificate
// create valid []Certificate only if the client cert and key files exists
if conf.EtcdCertFile != "" && conf.EtcdKeyFile != "" {
cert, err := tls.LoadX509KeyPair(conf.EtcdCertFile, conf.EtcdKeyFile)
if err != nil {
return nil, fmt.Errorf("cannot load client cert or key file: %s", err)
}
certificates = []tls.Certificate{cert}
}
tlsClientConfig := new(tls.Config)
if caCertPool != nil {
tlsClientConfig.RootCAs = caCertPool
if certificates != nil {
tlsClientConfig.Certificates = certificates
}
}
return tlsClientConfig, nil
}
// NewEtcdLeaderChecker returns a new instance
func NewEtcdLeaderChecker(con *vipconfig.Config) (*EtcdLeaderChecker, error) {
eConf = con
e := &EtcdLeaderChecker{key: eConf.Key, nodename: eConf.Nodename}
tlsConfig, err := getTransport(eConf)
if err != nil {
return nil, err
}
cfg := client.Config{
Endpoints: eConf.Endpoints,
TLS: tlsConfig,
DialKeepAliveTimeout: time.Second,
Username: eConf.EtcdUser,
Password: eConf.EtcdPassword,
}
c, err := client.New(cfg)
if err != nil {
return nil, err
}
e.kapi = c.KV
return e, nil
}
// GetChangeNotificationStream checks the status in the loop
func (e *EtcdLeaderChecker) GetChangeNotificationStream(ctx context.Context, out chan<- bool) error {
var state bool
var alreadyConnected = false
checkLoop:
for {
resp, err := e.kapi.Get(ctx, e.key)
if err != nil {
if ctx.Err() != nil {
break checkLoop
}
log.Printf("etcd error: %s", err)
out <- false
time.Sleep(time.Duration(eConf.Interval) * time.Millisecond)
continue
}
if (!alreadyConnected) {
log.Printf("etcd checker started up, found key %s", e.key)
alreadyConnected = true
}
for _, kv := range resp.Kvs {
state = string(kv.Value) == e.nodename
}
select {
case <-ctx.Done():
break checkLoop
case out <- state:
time.Sleep(time.Duration(eConf.Interval) * time.Millisecond)
continue
}
}
return ctx.Err()
}
|
package main
import (
"errors"
"strings"
"time"
)
func main() {
t := NewThing("blep")
time.Sleep(time.Second)
t.ChangeMessage("bork")
time.Sleep(time.Second)
t.EmphasizeMessage(1)
time.Sleep(time.Second)
t.EmphasizeMessage(3)
time.Sleep(time.Second)
t.Stop()
}
type Thing struct {
msg string
action chan func() error
}
func NewThing(msg string) *Thing {
t := &Thing{
msg: msg,
action: make(chan func() error),
}
go t.loop()
return t
}
func (t *Thing) Stop() {
t.action <- func() error {
return errors.New("stop please")
}
}
func (t *Thing) loop() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case fn := <-t.action:
if err := fn(); err != nil {
println("returning due to:", err.Error())
return
}
case <-ticker.C:
println(t.msg)
}
}
}
func (t *Thing) ChangeMessage(msg string) {
t.action <- func() error {
t.msg = msg
return nil
}
}
func (t *Thing) EmphasizeMessage(n int) {
t.action <- func() error {
t.msg += strings.Repeat("!", n)
return nil
}
}
|
package RPC
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"time"
)
const RPCUSER = "user"
const RPCPASSWORD = "pwd"
const RPCURL = "http://127.0.0.1:8332"
type Rpc_Requst struct {
Requst *http.Request
Ahead Rpc_Ahead
}
type Rpc_Ahead struct {
Id int `json:"id"`
Jsonrpc string `json:"jsonrpc"`
Method string `json:"method"`
Params []interface{} `json:"params"`
}
/*
创建一个新的 rpc请求 并初始化
*/
func NewRpcRequst()(Rpc_Requst,error){
R := Rpc_Requst{
Requst: nil,
Ahead: Rpc_Ahead{
Id: time.Now().Nanosecond(),
Jsonrpc: "2.0",
Params:nil,
},
}
var err error
R.Requst,err = http.NewRequest("POST",RPCURL,nil)
if err!=nil{
log.Fatal(err)
return R,err
}
R.Requst.Header.Add("Authorization","Basic "+Base64str(RPCUSER+":"+RPCPASSWORD))
R.Requst.Header.Add("Encoding","UTF-8")
R.Requst.Header.Add("Content-Type","application/json")
return R,nil
}
/*
发起rpc post请求 并返回结果
*/
func (R Rpc_Requst)Rpc_DoPost(method string,params []interface{})(interface{},error){
client := http.Client{}
R.Ahead.Method = method
if(len(params)!=0) {
R.Ahead.Params = params
}else{
R.Ahead.Params = make([]interface{},0)
}
fmt.Println("params:",R.Ahead.Params)
/*
序列化请求体
*/
jsonbyte,err:=json.Marshal(R.Ahead)
fmt.Println("send:"+string(jsonbyte))
if err!=nil{
fmt.Println(err)
return "",err
}
/*
设置请求体
*/
read := bytes.NewBuffer(jsonbyte)
R.Requst.Body = ioutil.NopCloser(read)
/*
客户端发起网络请求
*/
response,err := client.Do(R.Requst)
if err!=nil{
fmt.Println("网络请求失败:",err)
return "",err
}
/*
读取响应体
*/
resultbyte,err:= ioutil.ReadAll(response.Body)
if err!=nil{
fmt.Println("ReadAll Error::",err)
return "",err
}
/*
解析请求到的数据保存到map里并返回结果
*/
result := make(map[string]interface{})
json.Unmarshal(resultbyte,&result)
fmt.Println(result)
/*
检测网络请求状态
*/
statuscode := response.StatusCode
if(statuscode!=200){
fmt.Println("Post Error! Code:"+string(resultbyte))
return "",errors.New("Rpc请求失败 error:"+string(resultbyte))
}
return result["result"],nil
}
func Base64str(str string)string{
return base64.StdEncoding.EncodeToString([]byte(str))
}
|
package main
import "fmt"
func main() {
var a [3]int
fmt.Println(a[0])
fmt.Println(a[len(a)-1])
for i, v := range a {
fmt.Println("%d %d \n", i, v)
}
//var tet , bb int = 1, 2
var (
tet int;
bb string
)
print(tet, bb)
months := [...]string{1: "January", 12: "December"}
Q2 := months[4:7]
fmt.Println("%T %T", months, Q2)
}
|
package generate
import (
"github.com/spf13/cobra"
"opendev.org/airship/airshipctl/pkg/environment"
)
// NewGenerateCommand creates a new command for generating secret information
func NewGenerateCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {
generateRootCmd := &cobra.Command{
Use: "generate",
// TODO(howell): Make this more expressive
Short: "generates various secrets",
}
generateRootCmd.AddCommand(NewGenerateMasterPassphraseCommand(rootSettings))
return generateRootCmd
}
|
package skylark
import (
"fmt"
"bldy.build/build/label"
"github.com/google/skylark"
"github.com/pkg/errors"
)
// newAttributor returns a new bazel build context.
func attributors() bldyDict {
attributors := make(bldyDict)
for _, actionName := range []string{
"bool",
"int",
"int_list",
"label",
"label_keyed_string_dict",
"label_list",
"license",
"output",
"output_list",
"string",
"string_dict",
"string_list",
"string_list_dict",
} {
attributors[actionName] = attributer{actionName}
}
return attributors
}
type attributer struct {
attrType string
}
func (a attributer) Name() string { return a.attrType }
func (a attributer) Hash() (uint32, error) { return hashString(a.attrType), nil }
func (a attributer) Freeze() {}
func (a attributer) String() string { return a.attrType }
func (a attributer) Type() string { return "attributer" }
func (a attributer) Truth() skylark.Bool { return true }
func (a attributer) Call(thread *skylark.Thread, args skylark.Tuple, kwargs []skylark.Tuple) (skylark.Value, error) {
var i Attribute
x := attr{attrType: a.attrType}
switch a.attrType {
case "bool":
i = &x
case "int":
i = &intAttr{attr: x}
case "int_list":
i = &intListAttr{attr: x}
case "label":
i = &labelAttr{attr: x}
case "label_keyed_string_dict":
i = &labelKeyedStringDictAttr{attr: x}
case "label_list":
i = &labelListAttr{attr: x}
case "output":
i = &outputAttr{attr: x}
case "license",
"output_list",
"string",
"string_dict",
"string_list",
"string_list_dict":
panic(fmt.Sprintf("%s not implemented", a.attrType))
}
if err := unpackStruct(i, kwargs); err != nil {
return nil, errors.Wrap(err, "attiributor.call")
}
return i, nil
}
// Attribute is representation of a definition of an attribute.
// Use the attr module to create an Attribute.
// They are only for use with a rule or an aspect.
// https://docs.bazel.build/versions/master/skylark/lib/Attribute.html
type Attribute interface {
skylark.Value
GetDefault() skylark.Value
HasDefault() bool
}
type CanAllowEmpty interface {
AllowsEmpty() bool
Empty() skylark.Value
}
type Converts interface {
Convert(skylark.Value) (skylark.Value, error)
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#modules.attr
type attr struct {
attrType string
// Common to all Attrs
Default skylark.Value
Doc string
Mandatory bool
}
func (a *attr) Name() string { return a.attrType }
func (a *attr) Hash() (uint32, error) { return hashString(a.attrType), nil }
func (a *attr) Freeze() {}
func (a *attr) String() string { return a.attrType }
func (a *attr) Type() string { return "attr." + a.attrType }
func (a *attr) Truth() skylark.Bool { return true }
func (a *attr) GetDefault() skylark.Value { return a.Default }
func (a *attr) HasDefault() bool { return a.Default != nil }
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#bool
type boolAttr struct {
attr
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#int
type intAttr struct {
attr
Values []int
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#int_list
type intListAttr struct {
attr
NonEmpty bool
AllowEmpty bool
}
type configuration string
const (
Data configuration = "data"
Host configuration = "host"
Target configuration = "target"
)
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#label
type labelAttr struct {
attr
Executable bool
AllowFiles bool
AllowSingleFile bool
AllowdExtensionsList []string
Providers [][]string
SingleFile bool
Cfg configuration
}
func (l *labelAttr) Convert(arg skylark.Value) (skylark.Value, error) {
lblString, ok := arg.(skylark.String)
if !ok {
return nil, fmt.Errorf("attribute should be of type string")
}
if lbl, err := label.Parse(string(lblString)); err == nil {
return lbl, nil
} else {
return nil, err
}
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#label_keyed_string_dict
type labelKeyedStringDictAttr struct {
attr
Executable bool
AllowFiles bool
AllowdExtensionsList []string
Providers [][]string
SingleFile bool
Cfg configuration
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#label_list
type labelListAttr struct {
attr
Executable bool
AllowFiles bool
AllowEmpty bool
AllowdExtensionsList []string
Providers [][]string
SingleFile bool
Cfg configuration
}
func (l *labelListAttr) AllowsEmpty() bool {
return l.AllowEmpty
}
func (l *labelListAttr) Empty() skylark.Value {
if l.AllowEmpty {
return skylark.NewList([]skylark.Value{})
} else {
return nil
}
}
func (l *labelListAttr) Convert(arg skylark.Value) (skylark.Value, error) {
lblList, ok := arg.(*skylark.List)
if !ok {
return nil, fmt.Errorf("attribute should be of type list consisting of strings")
}
i := lblList.Iterate()
list := []skylark.Value{}
var p skylark.Value
for i.Next(&p) {
val, ok := skylark.AsString(p)
if !ok {
return nil, fmt.Errorf("convert: (type=%T %q) is not a skylark.String", p, val)
}
if lbl, err := label.Parse(val); err == nil {
list = append(list, lbl)
} else {
return nil, err
}
}
return skylark.NewList(list), nil
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#license
type licenseAttr struct{ attr }
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#output
type outputAttr struct{ attr }
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#output_list
type outputListAttr struct {
attr
NonEmpty bool
AllowEmpty bool
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#string
type stringAttr struct {
attr
Values []string
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#string_dict
type stringDictAttr struct {
attr
NonEmpty bool
AllowEmpty bool
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#string_list
type stringListAttr struct {
attr
NonEmpty bool
AllowEmpty bool
}
// https://docs.bazel.build/versions/master/skylark/lib/attr.html#string_list_dict
type stringListDict struct {
attr
NonEmpty bool
AllowEmpty bool
}
|
package main
import (
"fmt"
"github.com/mebiusashan/gcms/internal/config"
)
func main() {
cfx, err := config.Read("E:\\git\\gcms\\configs\\gcms.toml")
fmt.Println(cfx.Server.Port)
fmt.Println(err)
}
|
package hiragana
// Hiragana returns a map between hiragana characters and their pronunciation
func Hiragana() map[string]string {
Hira := make(map[string]string)
Hira["a"] = "あ"
Hira["i"] = "い"
Hira["u"] = "う"
Hira["e"] = "え"
Hira["o"] = "お"
Hira["ka"] = "か"
Hira["ki"] = "き"
Hira["ku"] = "く"
Hira["ke"] = "け"
Hira["ko"] = "こ"
Hira["sa"] = "さ"
Hira["shi"] = "し"
Hira["su"] = "す"
Hira["se"] = "せ"
Hira["so"] = "そ"
Hira["ta"] = "た"
Hira["chi"] = "ち"
Hira["tsu"] = "つ"
Hira["te"] = "て"
Hira["to"] = "と"
Hira["na"] = "な"
Hira["ni"] = "に"
Hira["nu"] = "ぬ"
Hira["ne"] = "ね"
Hira["no"] = "の"
Hira["ha"] = "は"
Hira["hi"] = "ひ"
Hira["fu"] = "ふ"
Hira["he"] = "へ"
Hira["ho"] = "ほ"
Hira["ma"] = "ま"
Hira["mi"] = "み"
Hira["mu"] = "む"
Hira["me"] = "め"
Hira["mo"] = "も"
Hira["ya"] = "や"
Hira["yu"] = "ゆ"
Hira["yo"] = "よ"
Hira["ra"] = "ら"
Hira["ri"] = "り"
Hira["ru"] = "る"
Hira["re"] = "れ"
Hira["ro"] = "ろ"
Hira["wa"] = "わ"
Hira["wi"] = "ゐ"
Hira["we"] = "ゑ"
Hira["wo"] = "を"
Hira["n"] = "ん"
Hira["kya"] = "きゃ"
Hira["kyu"] = "きゅ"
Hira["kyo"] = "きょ"
Hira["sha"] = "しゃ"
Hira["shu"] = "しゅ"
Hira["sho"] = "しょ"
Hira["cha"] = "ちゃ"
Hira["chu"] = "ちゅ"
Hira["cho"] = "ちょ"
Hira["nya"] = "にゃ"
Hira["nyu"] = "にゅ"
Hira["nyo"] = "にょ"
Hira["hya"] = "ひゃ"
Hira["hyu"] = "ひゅ"
Hira["hyo"] = "ひょ"
Hira["mya"] = "みゃ"
Hira["myu"] = "みゅ"
Hira["myo"] = "みょ"
Hira["rya"] = "りゃ"
Hira["ryu"] = "りゅ"
Hira["ryo"] = "りょ"
Hira["ga"] = "が"
Hira["gi"] = "ぎ"
Hira["gu"] = "ぐ"
Hira["ge"] = "げ"
Hira["go"] = "ご"
Hira["za"] = "ざ"
Hira["ji"] = "じ"
Hira["zu"] = "ず"
Hira["ze"] = "ぜ"
Hira["zo"] = "ぞ"
Hira["da"] = "だ"
Hira["dji"] = "ぢ"
Hira["dzu"] = "づ"
Hira["de"] = "で"
Hira["do"] = "ど"
Hira["ba"] = "ば"
Hira["bi"] = "び"
Hira["bu"] = "ぶ"
Hira["be"] = "べ"
Hira["bo"] = "ぼ"
Hira["pa"] = "ぱ"
Hira["pi"] = "ぴ"
Hira["pu"] = "ぷ"
Hira["pe"] = "ぺ"
Hira["po"] = "ぽ"
Hira["gya"] = "ぎゃ"
Hira["gyu"] = "ぎゅ"
Hira["gyo"] = "ぎょ"
Hira["ja"] = "じゃ"
Hira["ju"] = "じゅ"
Hira["jo"] = "じょ"
Hira["bya"] = "びゃ"
Hira["byu"] = "びゅ"
Hira["byo"] = "びょ"
Hira["pya"] = "ぴゃ"
Hira["pyu"] = "ぴゅ"
Hira["pyo"] = "ぴょ"
return Hira
}
|
package main
import (
"os"
"fmt"
"io"
"strings"
"bufio"
)
var _ = fmt.Println
const Separator = " "
type Reader struct {
*bufio.Reader
}
func NewReader(r io.Reader) *Reader {
return &Reader{bufio.NewReader(r)}
}
func (r *Reader) ReadEntry() (entry *Entry, err error) {
bytes, _, err := r.ReadLine()
if err != nil {
return
}
line := string(bytes)
sep := strings.Index(line, Separator)
if sep == -1 {
return
}
entry = &Entry{Checksum: line[0:sep], Filename: line[sep+len(Separator):]}
return
}
func (r *Reader) Each(f func(*Entry)) (err error) {
for {
if e, err := r.ReadEntry(); err == io.EOF {
break
} else if err != nil {
return err
} else if e != nil {
f(e)
}
}
return
}
func Load(filename string) (m map[string]string, err error) {
file, err := os.Open(filename)
if err != nil {
return
}
m = make(map[string]string)
r := NewReader(file)
err = r.Each(func(e *Entry) {
m[e.Filename] = e.Checksum
})
return m, err
}
|
package Solution
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func maxDepth(root *TreeNode) int {
return getDepth(root, 0)
}
func getDepth(node *TreeNode, upDeep int) int {
if node == nil {
return upDeep
}
upDeep += 1
leftChildDeep := getDepth(node.Left, upDeep)
rightChildDeep := getDepth(node.Right, upDeep)
if leftChildDeep > rightChildDeep {
return leftChildDeep
}
return rightChildDeep
}
|
// SchnorrSignatureSample project main.go
package main
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"fmt"
"math/big"
"reflect"
"github.com/btcsuite/btcd/btcec"
)
func main() {
// a is private key
a, _ := rand.Int(rand.Reader, btcec.S256().N)
m := []byte("message")
// Schnorr Signature > sign
R, s := sign(m, a)
// A is public key
aG := new(btcec.PublicKey)
aG.X, aG.Y = btcec.S256().ScalarBaseMult(a.Bytes())
A := aG
// Schnorr Signature > verify
result := verify(m, A, R, s)
fmt.Println(result)
}
// m is message, a is private key
func sign(m []byte, a *big.Int) (*btcec.PublicKey, *big.Int) {
// k is random value
k, _ := rand.Int(rand.Reader, btcec.S256().N)
// R is random point
R := new(btcec.PublicKey)
R.X, R.Y = btcec.S256().ScalarBaseMult(k.Bytes())
// sign { s = k - h(m, R)a }
s := (new(big.Int)).Mod((new(big.Int)).Sub(k, (new(big.Int)).Mul(h(m, R), a)), btcec.S256().N)
return R, s
}
// m is message, A is public key, R and s are the values returned in sign
func verify(m []byte, A, R *btcec.PublicKey, s *big.Int) bool {
// left side { sG }
sG := new(btcec.PublicKey)
sG.X, sG.Y = btcec.S256().ScalarBaseMult(s.Bytes())
// right side { R - h(m, R)A }
// -h(m, R)
h := (new(big.Int)).Mod((new(big.Int)).Mul(big.NewInt(-1), h(m, R)), btcec.S256().N)
// -h(m, R)A
hA := new(btcec.PublicKey)
hA.X, hA.Y = btcec.S256().ScalarMult(A.X, A.Y, h.Bytes())
// R - h(m, R)A
P := new(btcec.PublicKey)
P.X, P.Y = btcec.S256().Add(R.X, R.Y, hA.X, hA.Y)
return reflect.DeepEqual(sG.SerializeCompressed(), P.SerializeCompressed())
}
func h(m []byte, R *btcec.PublicKey) *big.Int {
// Anything is a hash
h := sha256.Sum256(m)
mac := hmac.New(sha256.New, R.SerializeCompressed())
mac.Write(h[:])
return (new(big.Int)).SetBytes(mac.Sum(nil))
}
|
package render
import "forum/pkg/model"
//CreateAllForum create forum list response
func CreateAllForum(in []*model.Forum) []*model.ForumResponse {
if len(in) < 1 {
return []*model.ForumResponse{}
}
out := make([]*model.ForumResponse, len(in))
for i, forum := range in {
out[i] = &model.ForumResponse{
ForumID: forum.ForumId,
Name: forum.Name,
Intro: forum.Intro,
Sort: forum.Sort,
Parent: forum.Parent,
CreatedAt: forum.CreatedAt.Format("2006-01-02 15:04:05"),
UpdatedAt: forum.UpdatedAt.Format("2006-01-02 15:04:05"),
}
}
return out
}
//CreateForum create show forum response
func CreateForum(forum *model.Forum) *model.ForumResponse {
return &model.ForumResponse{
ForumID: forum.ForumId,
Name: forum.Name,
Intro: forum.Intro,
Sort: forum.Sort,
Parent: forum.Parent,
CreatedAt: forum.CreatedAt.Format("2006-01-02 15:04:05"),
UpdatedAt: forum.UpdatedAt.Format("2006-01-02 15:04:05"),
}
}
|
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"os"
"sort"
"strings"
)
var (
fileName = flag.String("f", "examples/production.json", "Файл с правилами")
)
func main() {
rules, backRules, err := loadRules(*fileName)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Правила в Базе знаний:")
for k, v := range rules {
fmt.Printf("%s -> %v\n", k, v)
}
fmt.Println()
fmt.Println("БЗ в обратном порядке")
for k, v := range backRules {
fmt.Println(k, "<-", v)
}
fmt.Println()
reader := bufio.NewReader(os.Stdin)
fmt.Println()
fmt.Println("Введите факты:")
text, _, _ := reader.ReadLine()
fmt.Println()
wq := proccessForward(text, rules, true)
if len(wq) != 0 {
fmt.Println()
fmt.Println("Введите цель для проверки достижимости:")
text, _, _ = reader.ReadLine()
fmt.Println()
fmt.Println("Достижима ли цель?", strings.Contains(wq[len(wq)-1], string(text)))
fmt.Println()
fmt.Println("Итоговая рабочая память:")
fmt.Println(wq[len(wq)-1])
}
fmt.Println()
fmt.Println("Введите факт для проверки:")
text, _, _ = reader.ReadLine()
fmt.Println()
proccessBack(string(text), backRules)
}
// Прямой ход для набора фактов
func proccessForward(baseRulesJSON []byte, rules map[string][]string, print bool) []string {
workRules := make(map[string][]string, len(rules))
for k, v := range rules {
workRules[k] = append([]string{}, v...)
}
var workQueue []string
// Кладем исходные факты в рабочую очередь
var baseRules []string
json.Unmarshal(baseRulesJSON, &baseRules)
for _, rule := range baseRules {
workQueue = addToWorkQueue(workQueue, rule)
}
fmt.Println()
l := len(workQueue)
if print {
fmt.Println("Выводы:")
}
for i := 0; i < l; i++ {
out, ok := workRules[workQueue[i]]
if !ok {
continue
}
for _, fact := range out {
if print {
fmt.Println(workQueue[i], "->", fact)
}
workQueue = addToWorkQueue(workQueue, fact)
}
l = len(workQueue)
}
return workQueue
}
func proccessBack(goal string, backRules map[string][]string) {
if _, ok := backRules[goal]; !ok {
fmt.Println("В БЗ нет правила для вывода факта", goal)
return
}
workRules := make(map[string][]string, len(backRules))
for k, v := range backRules {
workRules[k] = append([]string{}, v...)
}
finalFacts := []string{}
stack := []string{goal}
for len(stack) != 0 {
top := stack[len(stack)-1]
stack = stack[:len(stack)-1]
rightFacts := workRules[top]
delete(workRules, top)
for _, rightFact := range rightFacts {
var tmp []string
json.Unmarshal([]byte(rightFact), &tmp)
for _, newGoal := range tmp {
if _, ok := workRules[newGoal]; !ok && !strings.Contains(strings.Join(finalFacts, " "), newGoal) {
finalFacts = append(finalFacts, newGoal)
continue
}
stack = append(stack, newGoal)
}
fmt.Println(top, "<-", rightFact)
}
}
if len(finalFacts) != 0 {
fmt.Println()
fmt.Println("Итоговый вывод:")
for _, fact := range finalFacts {
fmt.Println(fact)
}
}
}
func addToWorkQueue(workQueue []string, rule string) []string {
l := len(workQueue)
inputRule, _ := json.Marshal([]string{rule})
// Добавляем в очередь факт
workQueue = append(workQueue, string(inputRule))
// Добавляем в очередь комбинацию факта с уже имеющимеся
for i := 0; i < l; i++ {
var tmp []string
json.Unmarshal([]byte(workQueue[i]), &tmp)
tmp = append(tmp, rule)
sort.Strings(tmp)
tmpNew, _ := json.Marshal(tmp)
workQueue = append(workQueue, string(tmpNew))
}
return workQueue
}
// загрузка правил из файла
func loadRules(filePath string) (map[string][]string, map[string][]string, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, nil, fmt.Errorf("open rules file: %v", err)
}
var rules []fileRule
if err := json.NewDecoder(file).Decode(&rules); err != nil {
return nil, nil, fmt.Errorf("parsing rules file: %v", err)
}
out := make(map[string][]string, len(rules))
back := make(map[string][]string, len(rules))
for _, rule := range rules {
sort.Strings(rule.Fact)
data, _ := json.Marshal(rule.Fact)
out[string(data)] = append(out[string(data)], rule.Goal)
back[rule.Goal] = append(back[rule.Goal], string(data))
}
return out, back, nil
}
|
package main
import "fmt"
func main() {
foo(2020) // usual function
b1 := bar("Print")
fmt.Println(b1)
}
//usual function
func foo(x int) {
fmt.Println(x)
}
//Returning
func bar(b string) string {
return b
}
|
// GENERATED CODE - DO NOT EDIT!
//
// Generated by:
//
// go run gen_trace.go -o cache.pb.go -pkg testpb -files sourcegraph.com/sqs/grpccache/testpb@test.pb.go
//
// Called via:
//
// go generate
//
package testpb
import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"sourcegraph.com/sqs/grpccache"
)
type CachedTestServer struct{ TestServer }
func (s *CachedTestServer) TestMethod(ctx context.Context, in *TestOp) (*TestResult, error) {
ctx, cc := grpccache.Internal_WithCacheControl(ctx)
result, err := s.TestServer.TestMethod(ctx, in)
if !cc.IsZero() {
if err := grpccache.Internal_SetCacheControlTrailer(ctx, *cc); err != nil {
return nil, err
}
}
return result, err
}
type CachedTestClient struct {
TestClient
Cache *grpccache.Cache
}
func (s *CachedTestClient) TestMethod(ctx context.Context, in *TestOp, opts ...grpc.CallOption) (*TestResult, error) {
if s.Cache != nil {
var cachedResult TestResult
cached, err := s.Cache.Get(ctx, "Test.TestMethod", in, &cachedResult)
if err != nil {
return nil, err
}
if cached {
return &cachedResult, nil
}
}
var trailer metadata.MD
result, err := s.TestClient.TestMethod(ctx, in, grpc.Trailer(&trailer))
if err != nil {
return nil, err
}
if s.Cache != nil {
if err := s.Cache.Store(ctx, "Test.TestMethod", in, result, trailer); err != nil {
return nil, err
}
}
return result, nil
}
|
package main
import (
"github.com/mndrix/tap-go"
"github.com/opencontainers/runtime-tools/cgroups"
"github.com/opencontainers/runtime-tools/validation/util"
)
func main() {
var weight uint16 = 500
var leafWeight uint16 = 300
var major, minor int64 = 8, 0
var rate uint64 = 102400
t := tap.New()
t.Header(0)
defer t.AutoPlan()
g, err := util.GetDefaultGenerator()
if err != nil {
util.Fatal(err)
}
g.SetLinuxCgroupsPath(cgroups.RelCgroupPath)
g.SetLinuxResourcesBlockIOWeight(weight)
g.SetLinuxResourcesBlockIOLeafWeight(leafWeight)
g.AddLinuxResourcesBlockIOWeightDevice(major, minor, weight)
g.AddLinuxResourcesBlockIOLeafWeightDevice(major, minor, leafWeight)
g.AddLinuxResourcesBlockIOThrottleReadBpsDevice(major, minor, rate)
g.AddLinuxResourcesBlockIOThrottleWriteBpsDevice(major, minor, rate)
g.AddLinuxResourcesBlockIOThrottleReadIOPSDevice(major, minor, rate)
g.AddLinuxResourcesBlockIOThrottleWriteIOPSDevice(major, minor, rate)
err = util.RuntimeOutsideValidate(g, t, util.ValidateLinuxResourcesBlockIO)
if err != nil {
t.Fail(err.Error())
}
}
|
// Package handler registers "/table-of-contents.{js,css}" routes
// on http.DefaultServeMux on init.
package handler
import (
"go/build"
"log"
"net/http"
"path/filepath"
"github.com/shurcooL/go/gopherjs_http"
"github.com/shurcooL/httpfs/httputil"
"github.com/shurcooL/httpfs/vfsutil"
)
func init() {
// HACK: This code registers routes at root on default mux... That's not very nice.
http.Handle("/table-of-contents.js", httputil.FileHandler{File: gopherjs_http.Package("github.com/shurcooL/frontend/table-of-contents")})
http.Handle("/table-of-contents.css", httputil.FileHandler{File: vfsutil.File(filepath.Join(importPathToDir("github.com/shurcooL/frontend/table-of-contents"), "style.css"))})
}
func importPathToDir(importPath string) string {
p, err := build.Import(importPath, "", build.FindOnly)
if err != nil {
log.Fatalln(err)
}
return p.Dir
}
|
package main
import "fmt"
func main() {
show(23, "abc", 98, 89, 34)
}
func show(i int, s1 string, s ...int) {
fmt.Printf("%#v\n", i)
fmt.Printf("%#v\n", s1)
fmt.Printf("%#v\n", s)
}
func disp(i []int) {
fmt.Printf("%#v", i)
}
|
package concurrent
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestDo(t *testing.T) {
t.Run("empty", func(t *testing.T) {
require.NoError(t, Do())
})
t.Run("with nil", func(t *testing.T) {
require.NoError(t, Do(nil))
})
t.Run("one", func(t *testing.T) {
slice := []int{0}
require.NoError(t, Do(func() error {
slice[0] = 1
return nil
}))
require.Equal(t, []int{1}, slice)
})
t.Run("one with nil", func(t *testing.T) {
slice := []int{0}
require.NoError(t, Do(nil, func() error {
slice[0] = 1
return nil
}, nil))
require.Equal(t, []int{1}, slice)
})
t.Run("one with error", func(t *testing.T) {
require.EqualError(t, Do(func() error {
return errors.New("EXPECTED ERROR")
}), "1 error occurred: EXPECTED ERROR")
})
t.Run("normal", func(t *testing.T) {
slice := []int{0, 0, 0}
require.NoError(t, Do(
func() error {
time.Sleep(20 * time.Millisecond)
slice[0] = 0
return nil
},
func() error {
time.Sleep(20 * time.Millisecond)
slice[1] = 1
return nil
},
func() error {
time.Sleep(20 * time.Millisecond)
slice[2] = 2
return nil
},
))
require.Equal(t, []int{0, 1, 2}, slice)
})
t.Run("error", func(t *testing.T) {
slice := []int{0, 0, 0}
require.EqualError(t, Do(
func() error {
time.Sleep(20 * time.Millisecond)
slice[0] = 0
return errors.New("error")
},
func() error {
time.Sleep(20 * time.Millisecond)
slice[1] = 1
return errors.New("error")
},
func() error {
time.Sleep(20 * time.Millisecond)
slice[2] = 2
return nil
},
), `2 errors occurred:
* error
* error
`)
require.Equal(t, []int{0, 1, 2}, slice)
})
t.Run("panic", func(t *testing.T) {
slice := []int{0, 0, 0}
err := Do(
func() error {
time.Sleep(20 * time.Millisecond)
slice[0] = 0
panic("error")
},
func() error {
time.Sleep(20 * time.Millisecond)
slice[1] = 1
panic("error")
},
func() error {
time.Sleep(20 * time.Millisecond)
slice[2] = 2
return nil
},
)
require.Error(t, err)
t.Logf("got error: %s", err)
require.Equal(t, []int{0, 1, 2}, slice)
})
}
|
package http
import (
"github.com/wcong/ants-go/ants/node"
)
// welcome struct
type WelcomeInfo struct {
Message string
Greeting string
Time string
}
// result of start spider
type StartSpiderResult struct {
Success bool
Message string
Spider string
Time string
MasterNode *node.NodeInfo
}
|
package postal_test
import (
"github.com/cloudfoundry-incubator/notifications/config"
"github.com/cloudfoundry-incubator/notifications/postal"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const FixtureFile = "/postal/fixtures/test.text"
var _ = Describe("FileSystem", func() {
var fs postal.FileSystem
Describe("Read", func() {
It("returns a string of the file contents at the specified location", func() {
env := config.NewEnvironment()
path := env.RootPath + FixtureFile
contents, err := fs.Read(path)
if err != nil {
panic(err)
}
Expect(contents).To(Equal("We have some content\n\n\nAnd some more\n\n"))
})
})
Describe("FileExists", func() {
var path string
BeforeEach(func() {
env := config.NewEnvironment()
path = env.RootPath + FixtureFile
})
It("returns true if the file exists", func() {
response := fs.Exists(path)
Expect(response).To(BeTrue())
})
It("returns false if the file does not exist", func() {
response := fs.Exists(path + "not.There")
Expect(response).To(BeFalse())
})
})
})
|
/*
* Copyright (c) 2020. Ant Group. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package main
var (
Version = "development"
)
|
package userroute
type insertRequest []params
func insertRequestConvert(r insertRequest) []params {
return r
}
type listRequest struct {
UserId int `json:"userId" validate:"required"`
BelongToUser bool `json:"belongToUser"`
}
type listResponse []RouteWithGroups
func newListResponse(routes []RouteWithGroups) listResponse {
return routes
}
type deleteRequest struct {
Params []params `json:"groupId" validate:"dive"`
}
|
package orm
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestColumn_New(t *testing.T) {
t.Run("NewColumn", func(t *testing.T) {
column, err := NewColumn("test", "integer", "int32", true)
assert.NoError(t, err)
assert.NotEmpty(t, column.Type)
assert.NotEmpty(t, column.String())
assert.Contains(t, column.String(), "integer")
assert.Contains(t, column.String(), "not null")
})
t.Run("NewColumnArray", func(t *testing.T) {
var max int64 = 10
column, err := NewColumnArray("test", "integer", "int32", &max, false)
assert.NoError(t, err)
assert.NotEmpty(t, column.Type)
assert.NotEmpty(t, column.String())
assert.Contains(t, column.String(), "integer[10]")
})
}
|
package opentrace
import (
"github.com/opentracing/opentracing-go"
"net/http"
)
type (
// Injector knows how to propagate a trace
// http.Request agrument should be immutable
// For any modifications, use a copy of the http.Request and replace the pointer
Injector interface {
Inject(tracer opentracing.Tracer, ctx opentracing.SpanContext, r **http.Request) error
}
// InjectorFn a wrapper for the Injector interface
InjectorFn func(tracer opentracing.Tracer, ctx opentracing.SpanContext, r **http.Request) error
// HTTPHeadersInjector used by default, it injects a trace into the HTTP headers
HTTPHeadersInjector struct{}
)
// Inject
func (fn InjectorFn) Inject(tracer opentracing.Tracer, ctx opentracing.SpanContext, r **http.Request) error {
return fn(tracer, ctx, r)
}
// Inject creates a copy of the http.Request and replaces a pointer in the argument
func (HTTPHeadersInjector) Inject(tracer opentracing.Tracer, ctx opentracing.SpanContext, r **http.Request) error {
request := **r
header := make(http.Header)
for k, v := range request.Header {
header[k] = v
}
request.Header = header
err := tracer.Inject(ctx, opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(request.Header))
*r = &request
return err
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package charset
import (
"bytes"
"golang.org/x/text/encoding"
)
// EncodingLatin1Impl is the instance of encodingLatin1.
// TiDB uses utf8 implementation for latin1 charset because of the backward compatibility.
var EncodingLatin1Impl = &encodingLatin1{encodingUTF8{encodingBase{enc: encoding.Nop}}}
func init() {
EncodingLatin1Impl.self = EncodingLatin1Impl
}
// encodingLatin1 compatibles with latin1 in old version TiDB.
type encodingLatin1 struct {
encodingUTF8
}
// Name implements Encoding interface.
func (*encodingLatin1) Name() string {
return CharsetLatin1
}
// Peek implements Encoding interface.
func (*encodingLatin1) Peek(src []byte) []byte {
if len(src) == 0 {
return src
}
return src[:1]
}
// IsValid implements Encoding interface.
func (*encodingLatin1) IsValid(_ []byte) bool {
return true
}
// Tp implements Encoding interface.
func (*encodingLatin1) Tp() EncodingTp {
return EncodingTpLatin1
}
func (*encodingLatin1) Transform(_ *bytes.Buffer, src []byte, _ Op) ([]byte, error) {
return src, nil
}
|
/*
Package fork sets up process to run in the background.
*/
package fork
import (
"log"
"os"
"os/signal"
"syscall"
"time"
)
func spawnChild() (*os.Process, error) {
daemonState := os.Getenv("_DAEMON_STATE")
switch daemonState {
case "":
syscall.Umask(0022)
syscall.Setsid()
os.Setenv("_DAEMON_STATE", "1")
case "1":
os.Setenv("_DAEMON_STATE", "")
return nil, nil
}
var attrs os.ProcAttr
f, err := os.Open("/dev/null")
if err != nil {
return nil, err
}
attrs.Files = []*os.File{f, f, f}
exec_path, err := os.Executable()
if err != nil {
return nil, err
}
p, err := os.StartProcess(exec_path, os.Args, &attrs)
if err != nil {
return nil, err
}
return p, nil
}
// Daemonize spawns a child process and stops parent
func Daemonize() {
//create child process
p, err := spawnChild()
if err != nil {
log.Fatal(err.Error())
}
//only entered in parent
if p != nil {
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGTERM)
<-sigterm
os.Exit(0)
}
//parent never gets here
//give the parent time to install signal handler
//so we don't kill it prematurely and get ugly message on stdout
time.Sleep(100 * time.Millisecond)
//say good bye to parent
ppid := os.Getppid()
//we don't want to kill init
if ppid > 1 {
syscall.Kill(ppid, syscall.SIGTERM)
}
}
|
package main
import (
"encoding/json"
"fmt"
"inmemorydb/model"
"net"
)
var db = map[string]string{}
func main() {
listener, err := net.Listen("tcp", ":9091")
if err != nil {
panic(err)
}
for {
conn, err := listener.Accept()
if err != nil {
panic(err)
}
go handleConnection(conn)
}
}
func handleConnection(conn net.Conn) {
defer func() {
fmt.Println("connection closing")
conn.Close()
}()
fmt.Println("connection opened")
requestBytes := make([]byte, 4096)
n, err := conn.Read(requestBytes)
if err != nil {
return
}
requestTrim := requestBytes[:n]
data := &model.Message{}
err = json.Unmarshal(requestTrim, data)
if err != nil {
response(conn, false, "")
return
}
if data.Type == model.SET {
db[data.Key] = data.Value
response(conn, true, "")
return
}
if data.Type == model.GET {
getData, ok := db[data.Key]
if !ok {
response(conn, false, "")
return
}
response(conn, true, getData)
}
}
func response(conn net.Conn, status bool, data string) bool {
r := model.Response{
Status: status,
Data: data,
}
responseJson, _ := json.Marshal(r)
_, err := conn.Write(responseJson)
if err != nil {
return false
}
return true
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package test
import (
"github.com/iotaledger/wasp/packages/coretypes"
)
const ScName = "erc20"
const ScDescription = "ERC-20 PoC for IOTA Smart Contracts"
const ScHname = coretypes.Hname(0x200e3733)
const ParamAccount = "ac"
const ParamAmount = "am"
const ParamCreator = "c"
const ParamDelegation = "d"
const ParamRecipient = "r"
const ParamSupply = "s"
const VarBalances = "b"
const VarSupply = "s"
const FuncApprove = "approve"
const FuncInit = "init"
const FuncTransfer = "transfer"
const FuncTransferFrom = "transferFrom"
const ViewAllowance = "allowance"
const ViewBalanceOf = "balanceOf"
const ViewTotalSupply = "totalSupply"
const HFuncApprove = coretypes.Hname(0xa0661268)
const HFuncInit = coretypes.Hname(0x1f44d644)
const HFuncTransfer = coretypes.Hname(0xa15da184)
const HFuncTransferFrom = coretypes.Hname(0xd5e0a602)
const HViewAllowance = coretypes.Hname(0x5e16006a)
const HViewBalanceOf = coretypes.Hname(0x67ef8df4)
const HViewTotalSupply = coretypes.Hname(0x9505e6ca)
|
package web
import (
"fmt"
"html"
"net/http"
"sync"
"github.com/criteo/graphite-remote-adapter/client"
"github.com/criteo/graphite-remote-adapter/client/graphite"
"github.com/criteo/graphite-remote-adapter/config"
"github.com/criteo/graphite-remote-adapter/ui"
"github.com/criteo/graphite-remote-adapter/utils/template"
"github.com/davecgh/go-spew/spew"
assetfs "github.com/elazarl/go-bindata-assetfs"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
)
const namespace = "remote_adapter"
const apiSubsystem = "api"
var (
requestCounter = promauto.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: apiSubsystem,
Name: "requests_total",
Help: "A counter for requests to the wrapped handler.",
},
[]string{"handler", "code", "method"},
)
requestDuration = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: apiSubsystem,
Name: "request_duration_seconds",
Help: "A histogram of latencies for requests.",
Buckets: []float64{.25, .5, 1, 2.5, 5, 10},
},
[]string{"handler", "method"},
)
responseSize = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: apiSubsystem,
Name: "response_size_bytes",
Help: "A histogram of response sizes for requests.",
Buckets: []float64{200, 500, 900, 1500},
},
[]string{"handler"},
)
)
// Handler serves various HTTP endpoints of the remote adapter server
type Handler struct {
logger log.Logger
cfg *config.Config
router *mux.Router
reloadCh chan chan error
writers []client.Writer
readers []client.Reader
lock sync.RWMutex
}
func instrumentHandler(name string, handlerFunc http.HandlerFunc) http.Handler {
return promhttp.InstrumentHandlerDuration(
requestDuration.MustCurryWith(prometheus.Labels{"handler": name}),
promhttp.InstrumentHandlerCounter(
requestCounter.MustCurryWith(prometheus.Labels{"handler": name}),
promhttp.InstrumentHandlerResponseSize(
responseSize.MustCurryWith(prometheus.Labels{"handler": name}),
http.HandlerFunc(handlerFunc),
),
),
)
}
// New initializes a new web Handler.
func New(logger log.Logger, cfg *config.Config) *Handler {
router := mux.NewRouter()
h := &Handler{
cfg: cfg,
logger: logger,
router: router,
reloadCh: make(chan chan error),
}
h.buildClients()
staticFs := http.FileServer(
&assetfs.AssetFS{Asset: ui.Asset, AssetDir: ui.AssetDir, AssetInfo: ui.AssetInfo, Prefix: ""})
// Add pprof handler.
router.PathPrefix("/debug/").Handler(http.DefaultServeMux)
// Add your routes as needed
router.Methods("GET").PathPrefix("/static/").Handler(staticFs)
router.Methods("GET").Path(h.cfg.Web.TelemetryPath).Handler(promhttp.Handler())
router.Methods("GET").Path("/-/healthy").Handler(instrumentHandler("healthy", h.healthy))
router.Methods("POST").Path("/-/reload").Handler(instrumentHandler("reload", h.reload))
router.Methods("GET").Path("/").Handler(instrumentHandler("home", h.home))
router.Methods("GET").Path("/simulation").Handler(instrumentHandler("home", h.simulation))
router.Methods("POST").Path("/write").Handler(instrumentHandler("write", h.write))
router.Methods("POST").Path("/read").Handler(instrumentHandler("read", h.read))
return h
}
// Reload returns the receive-only channel that signals configuration reload requests.
func (h *Handler) Reload() <-chan chan error {
return h.reloadCh
}
// ApplyConfig updates the config field of the Handler struct
func (h *Handler) ApplyConfig(cfg *config.Config) error {
h.lock.Lock()
defer h.lock.Unlock()
for _, w := range h.writers {
w.Shutdown()
}
for _, r := range h.readers {
r.Shutdown()
}
h.cfg = cfg
h.buildClients()
return nil
}
func (h *Handler) buildClients() {
level.Info(h.logger).Log("cfg", h.cfg, "msg", "Building clients")
h.writers = nil
h.readers = nil
if c := graphite.NewClient(h.cfg, h.logger); c != nil {
h.writers = append(h.writers, c)
h.readers = append(h.readers, c)
}
level.Info(h.logger).Log(
"num_writers", len(h.writers), "num_readers", len(h.readers), "msg", "Built clients")
}
// Run serves the HTTP endpoints.
func (h *Handler) Run() error {
level.Info(h.logger).Log("ListenAddress", h.cfg.Web.ListenAddress, "msg", "Listening")
return http.ListenAndServe(h.cfg.Web.ListenAddress, h.router)
}
func (h *Handler) healthy(w http.ResponseWriter, r *http.Request) {
h.lock.RLock()
defer h.lock.RUnlock()
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "OK")
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {
http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Config succesfully reloaded.")
}
func (h *Handler) home(w http.ResponseWriter, r *http.Request) {
status := struct {
VersionInfo string
VersionBuildContext string
Cfg string
Readers map[string]string
Writers map[string]string
}{
VersionInfo: version.Info(),
VersionBuildContext: version.BuildContext(),
Cfg: html.EscapeString(spew.Sdump(h.cfg)),
Readers: map[string]string{},
Writers: map[string]string{},
}
for _, r := range h.readers {
status.Readers[r.Name()] = html.EscapeString(spew.Sdump(r))
}
for _, w := range h.writers {
status.Writers[w.Name()] = html.EscapeString(spew.Sdump(w))
}
bytes, err := template.ExecuteTemplate("status.html", status)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Write(bytes)
}
func (h *Handler) simulation(w http.ResponseWriter, r *http.Request) {
bytes, err := template.ExecuteTemplate("simulation.html", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Write(bytes)
}
|
package api
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"github.com/DigitalOnUs/inotx/config"
)
//////// models //////////
var (
ErrEmptyInputFile = errors.New("Empty file to consulize")
ErrorUnsupportedFormat = errors.New("Not supported extension")
)
//File struct
type File struct {
// extension of the file
Extension string `json:"extension,omitempty"`
// content of the file
Payload []byte `json:"payload,omitempty"`
}
//Response default
type Response struct {
Consulfile *File `json:"consulfile,omitempty"`
Images []*File `json:"images,omitempty"`
Code int32 `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
///// end models ///////
//Consulize add the values
func Consulize(w http.ResponseWriter, r *http.Request) {
var input File
decoder := json.NewDecoder(r.Body)
// validations
if err := decoder.Decode(&input); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if len(input.Payload) < 1 {
http.Error(w, ErrEmptyInputFile.Error(), http.StatusBadRequest)
return
}
// basic validations
out, err := convert(input)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
res, err := getEmbedByJson(out.Images[1].Payload)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
w.Write(res)
}
func convert(input File) (out Response, err error) {
out = Response{}
if input.Extension != ".hcl" && input.Extension != ".json" {
err = fmt.Errorf("%w : %s", ErrorUnsupportedFormat, input.Extension)
return
}
ext := input.Extension[1:]
defaultName := "inputDocument" + input.Extension
reader := bytes.NewReader(input.Payload)
document, err := config.Parse(reader, defaultName, ext)
if err != nil {
return
}
// translate document
documentWithConsul, err := config.AddConsul(document)
if err != nil {
return
}
// getting the output hcl/json
var b bytes.Buffer
payload := bufio.NewWriter(&b)
err = config.Write(payload, ext, documentWithConsul)
if err != nil {
err = fmt.Errorf("Error generating the consul output file: %w", err)
return
}
// ------------ Image fetching with json only ----------------
// currently we support bot json and hcl , but for arcentry integration it is only json based
getJson := func(doc *config.Root) []byte {
var buf bytes.Buffer
writer := bufio.NewWriter(&buf)
config.WriteJSON(writer, doc)
return buf.Bytes()
}
// Would be bette just to check if it is hcl to do the conversion
// and just calculate one, but for demo, let's do everything
initial, final := getJson(document), getJson(documentWithConsul)
// by now just adding
// Polo : These are the jsons to plot with Arcentry
out.Images = []*File{
&File{
Extension: ".json", // redudant
Payload: initial,
},
&File{
Extension: ".json", // redundant
Payload: final,
},
}
// -----------------------------------------------------------
out.Code = http.StatusOK
//redudant
out.Consulfile = &File{
Extension: input.Extension,
Payload: b.Bytes(),
}
return
}
|
package main
import (
"fmt"
)
func addBinary(a string, b string) string {
i := len(a) - 1
j := len(b) - 1
s := ""
var plus byte
for i >= 0 || j >= 0 {
t := plus
if i >= 0 {
t = t + a[i] - byte(48)
}
if j >= 0 {
t = t + b[j] - byte(48)
}
r := t % 2
plus = t / 2
s = fmt.Sprintf("%d%s", r, s)
i--
j--
}
if plus > 0 {
s = fmt.Sprintf("%d%s", 1, s)
}
return s
}
func main() {
fmt.Println(addBinary("1101", "101"))
}
|
package base
import (
"backend/base/ws"
"backend/user/models"
"backend/utils/common"
"backend/utils/gredis"
"backend/utils/logging"
"backend/utils/response"
"encoding/json"
"github.com/astaxie/beego/core/validation"
"github.com/gin-gonic/gin"
"net/http"
)
func UserLogin(c *gin.Context) {
type User struct {
Username string `json:"username";valid:"Required; MaxSize(50)"`
Password string `json:"password";valid:"Required; MaxSize(50)"`
}
var user User
err := c.ShouldBind(&user)
if err != nil {
c.JSON(http.StatusOK, gin.H{
"code": 4001,
"message": "parameter error",
})
}
username := user.Username
password := user.Password
valid := validation.Validation{}
ok, _ := valid.Valid(&user)
data := make(map[string]interface{})
code := response.SUCCESS
if ok {
success, user := models.CheckPassword(username, password)
if success {
token, err := common.GenerateToken(user.Username, user.PasswordDigest, user.ID)
if err != nil {
code = response.ErrorAuthToken
} else {
data["token"] = token
data["id"] = user.ID
data["username"] = user.Username
data["role"] = user.Role
code = response.SUCCESS
}
} else {
for _, err := range valid.Errors {
logging.Info(err.Key, err.Message)
}
code = response.ErrorAuthToken
}
} else {
for _, err := range valid.Errors {
logging.Warn(err.Key, err.Message)
}
}
result := response.Response{}
result.Code = code
result.Msg = response.Msg[code]
result.Data = data
c.JSON(http.StatusOK, result)
}
func Redis(context *gin.Context) {
b, _ := gredis.Get("hub")
//s, _ := gredis.GetString("hub")
//atoi, _ := strconv.Atoi(s)
//i := 1 / atoi
data := json.Unmarshal(b, ws.Hub{})
context.JSON(http.StatusOK, gin.H{
"data": data,
})
}
|
package compoundsplitting
import (
"bufio"
"os"
"strconv"
"strings"
)
// Dictionary filter for the splitting algorithm
// based on the words in the contextionary
type ContextionaryDict struct {
dict map[string]int // storing the word and its occurrence
}
// NewContextionaryDict
// uses a dictionary file that was created using the preprocessing procedures
func NewContextionaryDict(contextionaryDictFile string) (*ContextionaryDict, error) {
file, err := os.Open(contextionaryDictFile)
if err != nil {
return nil, err
}
defer file.Close()
dict := &ContextionaryDict{
dict: make(map[string]int, 400000),
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
split := strings.Split(line, ",")
occurrence, err := strconv.Atoi(split[1])
if err != nil {
return nil, err
}
dict.dict[split[0]] = occurrence
}
return dict, nil
}
// Contains true if word is in contextionary
func (cd *ContextionaryDict) Contains(word string) bool {
_, exists := cd.dict[word]
return exists
}
//Score prefers long and few words
func (cd *ContextionaryDict) Score(phrase []string) float64 {
// Prefer longer words as scoring
// Assumption is that the compound words are on average more similar to splittings that
// share most of the characters with the compound.
lenScore := 0
for _, word := range phrase {
lenScore += len(word)
}
// Give a boost for less words
if len(phrase) == 2 {
lenScore += 3
}
if len(phrase) == 3 {
lenScore += 1
}
return float64(lenScore)
}
// DictMock used for unit testing
type DictMock struct {
scores map[string]float64
}
// Contains
func (dm *DictMock) Contains(word string) bool {
_, exists := dm.scores[word]
return exists
}
// Score
func (dm *DictMock) Score(phrase []string) float64 {
score := 0.0
for _, word := range phrase {
score += dm.scores[word]
}
return score
}
|
package main
import (
"log"
"os"
)
const maxConnRetries = 5
func reqEnv(name string) string {
val := os.Getenv(name)
if len(val) == 0 {
log.Fatalf("please set the %s environment variable", name)
}
return val
}
func main() {
}
|
package model
import (
"ginTest/dao"
"github.com/pkg/errors"
"time"
)
//订单:Order
//Id
//订单编码 OrderCode
//商品名称 GoodsName
//商品单位 GoodsUnit
//商品数量 GoodsNumbers
//总金额 TotalAmount
//供应商 SupplierName
//支付状态 PayStatus 0:未支付 1:已支付
type Orders struct {
Id int `json:"_" `
OrderCode string `json:"order_code" binding:"required"`
GoodsName string `json:"goods_name" binding:"required"`
GoodsUnit string `json:"goods_unit" binding:"required"`
GoodsNumbers int `json:"goods_numbers" binding:"required"`
TotalAmount int `json:"total_amount" binding:"required"`
SupplierName string `json:"supplier_name" binding:"required"`
PayStatus int `json:"pay_status" binding:"required"`
CreateTime string `json:"_"`
}
func FindSameOrderCode(orderCode string)(n int64){
var order []Orders
n=dao.DB.Where("",orderCode).Find(&order).RowsAffected
return
}
func AddOrders(order Orders) (err error){
order.CreateTime=time.Now().Format("2006-01-02 15:04:05")
if err=dao.DB.Create(&order).Error;err!=nil{
err=errors.New("add to mysql failed")
return
}
return
}
func FindAllBill() (order []Orders,err error) {
if err=dao.DB.Find(&order).Error;err!=nil{
return nil,errors.New("not find")
}
return order,nil
}
func FindOneBill(id string)(order Orders,err error){
if err=dao.DB.Debug().Where("order_code=?",id).Find(&order).Error;err!=nil{
err=errors.New("find failed")
return
}
return
}
func UpdateOneBill(id string)(order Orders,err error){
if err=dao.DB.Debug().Where("order_code=?",id).First(&order).Error;err!=nil{
err=errors.New("modify failed")
return
}
return
}
func SaveOneBill(order Orders) (err error) {
if err=dao.DB.Debug().Save(order).Error;err!=nil{
err=errors.New("save failed")
return
}
return
}
func DeleteOneBill(id string) (err error) {
var order Orders
if err=dao.DB.Where("order_code=?",id).Delete(&order).Error;err!=nil{
err=errors.New("delete failed")
return
}
return
}
func FindMoreBill(goodsName ,supplier string,payStatus int)(order []Orders,err error){
db:=dao.DB
if len(goodsName)!=0{
db=db.Where("goods_name=?",goodsName)
}
if len(supplier)!=0{
db=db.Where("supplier_name=?",supplier)
}
if payStatus!=0{
db=db.Where("pay_status=?",payStatus)
}
if err=db.Debug().Find(&order).Error;err!=nil{
return nil,errors.New("find failed")
}
return order,nil
} |
package main
import "fmt"
var a int
func main() {
for {
fmt.Println("Введите чило")
_, err := fmt.Scan(&a)
if err != nil {
fmt.Printf("error => %s", err)
}
fmt.Println(fib(a))
}
}
func fib(n int) int {
numberFib := make(map[int]int)
numberFib[0] = 0
numberFib[1] = 1
if val, ok := numberFib[n]; ok {
return val
}
numberFib[n] = fib(n-1) + fib(n-2)
return numberFib[n]
}
|
package fishy
import (
"fmt"
"github.com/getcouragenow/core-bs/sdk/pkg/common/gitutil"
"github.com/getcouragenow/core-bs/sdk/pkg/common/osutil"
"github.com/getcouragenow/core-bs/sdk/pkg/oses"
log "github.com/sirupsen/logrus"
"os"
)
const (
bsFishes = "github.com/getcouragenow/core-fish"
)
type GoFishInstallation struct {
BinName string
OrgName string
Repo string
BinPath string
SrcPath string
FishRepo string
Platform string
Version string
OSName string
userDir string
}
func NewGoFishInstall(u *oses.UserOsEnv) *GoFishInstallation {
binName := "gofish"
orgName := "fishworks"
gitRepo := fmt.Sprintf("github.com/%s/%s", orgName, binName)
goPath := u.GetGoEnv().GoPath()
if goPath == "" {
os.Setenv("GOPATH", fmt.Sprintf("%s/%s", u.GetOsProperties().GetRoot(), "workspace/go/"))
}
return &GoFishInstallation{
Platform: u.GetOsProperties().GetOsInfo().GetPlatform(),
BinName: binName,
OrgName: orgName,
Repo: gitRepo,
BinPath: u.GetGoPath() + "/bin",
SrcPath: u.GetGoPath() + "/gofish",
FishRepo: "https://github.com/getcouragenow/core-fish",
Version: "v0.11.0",
OSName: u.GetOsProperties().GetOsInfo().GetOsName(),
userDir: u.GetOsProperties().GetRoot(),
}
}
func (g *GoFishInstallation) InstallGoFish() error {
log.Infof("Installing gofish to GOPATH dir")
// clean it up first
g.cleanGoFishGit()
return g.runInstallScript()
}
func (g *GoFishInstallation) GofishInit() error {
_, err := osutil.RunUnixCmd(`gofish`, `init`)
return err
}
func (g *GoFishInstallation) SetFishRig() error {
os.Setenv("GOFISH_RIGS", g.SrcPath)
return os.Setenv("GOFISH_DEFAULT_RIG", fmt.Sprintf("%s/%s", g.SrcPath, "core-fish"))
}
func (g *GoFishInstallation) InitGoFish() error {
if err := g.SetFishRig(); err != nil {
return err
}
return gitutil.GitClone(g.FishRepo, g.SrcPath)
}
func (g *GoFishInstallation) UninstallGoFish() error {
return g.cleanGoFishGit()
}
func (g *GoFishInstallation) cleanGoFishGit() error {
os.RemoveAll("/usr/local/gofish")
os.RemoveAll(fmt.Sprintf("%s/.%s", g.userDir, g.BinName))
gitutil.GitRemove(g.SrcPath)
gitutil.GitRemove(g.BinPath + "/" + g.BinName)
return nil
}
|
// Package handlers provides http handlers for HPKE.
package handlers
import (
"bytes"
"fmt"
"hash/fnv"
"net/http"
"strconv"
"time"
"github.com/rs/cors"
"github.com/pomerium/pomerium/internal/httputil"
"github.com/pomerium/pomerium/internal/urlutil"
"github.com/pomerium/pomerium/pkg/hpke"
)
// HPKEPublicKeyPath is the path to the HPKE public key.
const HPKEPublicKeyPath = urlutil.HPKEPublicKeyPath
// HPKEPublicKeyHandler returns a handler which returns the HPKE public key.
func HPKEPublicKeyHandler(publicKey *hpke.PublicKey) http.Handler {
return cors.AllowAll().Handler(httputil.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
bs := publicKey.Bytes()
hasher := fnv.New64()
_, _ = hasher.Write(bs)
h := hasher.Sum64()
w.Header().Set("Cache-Control", "max-age=60")
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Length", strconv.Itoa(len(bs)))
w.Header().Set("ETag", fmt.Sprintf(`"%x"`, h))
http.ServeContent(w, r, "hpke-public-key", time.Time{}, bytes.NewReader(bs))
return nil
}))
}
|
package cookiestore
import (
"crypto/rand"
"fmt"
"net/http"
"time"
"github.com/byuoitav/auth/session"
"github.com/golang-jwt/jwt/v4"
)
type Store struct {
ttl int
maxAge int
key []byte
}
func NewStore(opts ...Option) *Store {
signingKey := make([]byte, 64)
_, err := rand.Read(signingKey)
if err != nil {
panic(fmt.Sprintf("Couldn't autogenerate signing key: %s", err))
}
s := &Store{
ttl: 120, // Two hours
maxAge: 10080, // One week
key: signingKey,
}
for _, opt := range opts {
opt(s)
}
return s
}
func (s *Store) new(name string) *session.Session {
se := session.NewSession(s, name)
// Set exp claim to the MaxAge of the session
se.Values["exp"] = time.Now().Add(time.Minute * time.Duration(s.maxAge)).Format(time.RFC3339)
return se
}
func (s *Store) Get(r *http.Request, name string) (*session.Session, error) {
// Check for existing session
j, err := r.Cookie(name)
if err != nil {
// No existing session, create a new one
return s.new(name), nil
}
// Validate the existing session
token, err := jwt.Parse(j.Value, func(T *jwt.Token) (interface{}, error) {
if T.Method.Alg() != "HS256" {
// Invalid signing method, return new session
return "", fmt.Errorf("Invalid signing method %v", T.Method.Alg())
}
return []byte(s.key), nil
})
if err != nil {
// Signature invalid, return new session
return s.new(name), fmt.Errorf("Session cookie invalid: %s", err)
}
// Check that the session hasn't passed max age
exp, ok := token.Claims.(jwt.MapClaims)["exp"]
if !ok {
// No expiration claim, new session
return s.new(name), fmt.Errorf("Session cookie lacks exp claim")
}
//jwt has an expiration time
t, err := time.Parse(time.RFC3339, exp.(string))
if err != nil {
// Token has no parsable expiration date restart
return s.new(name), fmt.Errorf("Session cookie exp claim unparsable")
}
// if the jwt is expired
if t.Before(time.Now()) {
return s.new(name), fmt.Errorf("Session cookie expired")
}
// If we care to check for inactivity
if s.ttl > 0 {
// Check that the session hasn't hit the inactivity limit
iat, ok := token.Claims.(jwt.MapClaims)["iat"]
if !ok {
// No expiration claim, new session
return s.new(name), fmt.Errorf("Session cookie lacks iat claim")
}
//jwt has an issued at time
it, err := time.Parse(time.RFC3339, iat.(string))
if err != nil {
// Token has no parsable expiration date restart
return s.new(name), fmt.Errorf("Session cookie iat claim unparsable")
}
// if the jwt has passed inactivity window
if time.Since(it) > time.Duration(s.ttl)*time.Minute {
return s.new(name), fmt.Errorf("Session inactivity limit passed")
}
}
// Load valid session
se := s.new(name)
se.IsNew = false
se.Values = token.Claims.(jwt.MapClaims)
return se, nil
}
func (s *Store) Save(r *http.Request, w http.ResponseWriter, se *session.Session) error {
// Populate the claims
claims := jwt.MapClaims{}
for k, v := range se.Values {
claims[k] = v
}
// Update iat (issued at) claim to now
claims["iat"] = time.Now().Format(time.RFC3339)
// Create and sign token
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signedToken, err := token.SignedString([]byte(s.key))
if err != nil {
return fmt.Errorf("failed to sign token: %w", err)
}
// Derive the cookie domain from the hostname of the requested URL
domain := r.URL.Hostname()
// Write cookie
sessionCookie := &http.Cookie{
Name: se.Name(),
Value: signedToken,
HttpOnly: false,
Secure: false,
Path: "/",
Domain: domain,
}
http.SetCookie(w, sessionCookie)
return nil
}
func (s *Store) Drop(r *http.Request, w http.ResponseWriter, name string) error {
c := &http.Cookie{
Name: name,
Value: "",
HttpOnly: false,
Secure: false,
Path: "/",
Domain: r.URL.Hostname(),
}
http.SetCookie(w, c)
return nil
}
|
package skeleton
import (
"bytes"
"crypto/md5"
//"encoding/base64"
"encoding/binary"
"errors"
"io"
"io/ioutil"
"log"
"net/http"
"strconv"
"github.com/golang/protobuf/proto"
pbd "yunjing.me/phoenix/pbd/go"
oaccount "webapi/account"
osession "webapi/session"
)
const (
kMinPacketSize = 2 + 4 + 2 + md5.Size // 最小包体积
kTokenSize = 24 // 访问令牌长度
kAPISecret = "Lo01v8!P" //
)
type MessageHandler func(*Skeleton, *osession.Session, *oaccount.Role, proto.Message) (error, uint16, proto.Message)
type Packet struct {
rn uint16 // 随机数
uid uint32 // 角色编号
token []byte // 访问令牌
pid uint16 // 协议编号
payload []byte // 协议正文
sign []byte // 签名
}
// 校验签名
func (self *Packet) CheckSign() bool {
// 校验签名
idx := 0
total := 6 + 2 + len([]byte(kAPISecret))
if self.uid != 0 && (self.token != nil && len(self.token) > 0) {
total += len(self.token)
}
if self.payload != nil && len(self.payload) > 0 {
total += len(self.payload)
}
raw := make([]byte, total)
binary.LittleEndian.PutUint16(raw, self.rn)
idx += 2
binary.LittleEndian.PutUint32(raw[2:], self.uid)
idx += 4
// hash.Write(raw)
if self.uid != 0 {
if self.token == nil || len(self.token) == 0 {
return false
}
copy(raw[idx:], self.token)
idx += len(self.token)
}
// raw1 := make([]byte, 2)
binary.LittleEndian.PutUint16(raw[idx:], self.pid)
idx += 2
// hash.Write(raw1)
if self.payload != nil && len(self.payload) > 0 {
// hash.Write(self.payload)
copy(raw[idx:], self.payload)
idx += len(self.payload)
}
// hash.Write([]byte(kAPISecret))
copy(raw[idx:], []byte(kAPISecret))
idx += len([]byte(kAPISecret))
hash := md5.New()
hash.Write(raw)
verify := hash.Sum(nil)
//log.Printf("%v", raw)
//log.Printf("%v, %v", verify, self.sign)
return bytes.Equal(verify, self.sign)
}
func recv(r *http.Request) (error, *Packet) {
var reader io.Reader = r.Body
maxFormSize := int64(1<<63 - 1)
if _, ok := r.Body.(*maxBytesReader); !ok {
maxFormSize = int64(10 << 20)
reader = io.LimitReader(r.Body, maxFormSize+1)
}
b, e := ioutil.ReadAll(reader)
if e != nil {
log.Printf("读取字节流时出错: %v", e)
return e, nil
}
l := len(b)
if l == 0 {
log.Printf("读取字节流时包体过小: %d", l)
return errors.New("http trunk too short"), nil
}
if int64(l) > maxFormSize {
log.Printf("读取字节流时包体过大: %d", l)
return errors.New("http trunk too large"), nil
}
if int64(l) < kMinPacketSize {
log.Printf("读取字节流时包体过小1: %d", l)
return errors.New("http trunk too short"), nil
}
// log.Printf("%v", b)
packet := new(Packet)
packet.rn = binary.LittleEndian.Uint16(b[:])
packet.uid = binary.LittleEndian.Uint32(b[2:])
if packet.uid != 0 {
if l < (kMinPacketSize + kTokenSize) {
log.Printf("读取字节流时包体积过小2: %d", l)
return errors.New("http trunk too short"), nil
}
packet.token = b[6 : 6+kTokenSize]
packet.pid = binary.LittleEndian.Uint16(b[6+kTokenSize:])
if (8 + kTokenSize) < (l - md5.Size) {
packet.payload = b[8+kTokenSize : l-md5.Size]
}
} else {
packet.pid = binary.LittleEndian.Uint16(b[6:])
if 8 < (l - md5.Size) {
packet.payload = b[8:(l - md5.Size)]
}
}
packet.sign = b[l-md5.Size : l]
return nil, packet
}
// 发送消息
func send(w http.ResponseWriter, pid uint16, payload proto.Message) {
raw1, _ := proto.Marshal(payload)
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Length", strconv.Itoa(len(raw1)+2))
var raw2 []byte = make([]byte, len(raw1)+2)
binary.LittleEndian.PutUint16(raw2, pid)
copy(raw2[2:], raw1)
w.Write(raw2[:])
}
func doServerInternalErrorSend(w http.ResponseWriter, err error) {
errorID := uint32(pbd.ECODE_SERVER_INVALID)
payload := &pbd.Error{
Code: &errorID,
}
send(w, uint16(pbd.MSG_ERROR), payload)
}
// ----------------------------------------------------------------------------
|
package service_test
import (
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
"testing"
"time"
"github.com/go-ocf/cloud/resource-aggregate/cqrs"
"github.com/go-ocf/kit/codec/cbor"
"github.com/go-ocf/kit/codec/json"
"github.com/go-ocf/cloud/authorization/provider"
c2cTest "github.com/go-ocf/cloud/cloud2cloud-gateway/test"
"github.com/go-ocf/cloud/cloud2cloud-gateway/uri"
"github.com/go-ocf/cloud/grpc-gateway/pb"
grpcTest "github.com/go-ocf/cloud/grpc-gateway/test"
"github.com/go-ocf/go-coap"
kitNetGrpc "github.com/go-ocf/kit/net/grpc"
"github.com/go-ocf/sdk/schema/cloud"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const TEST_TIMEOUT = time.Second * 20
func TestRequestHandler_RetrieveResource(t *testing.T) {
deviceID := grpcTest.MustFindDeviceByName(grpcTest.TestDeviceName)
type args struct {
uri string
accept string
}
tests := []struct {
name string
args args
wantContentType string
wantCode int
want interface{}
}{
{
name: "JSON: " + uri.Devices + "/" + deviceID + cloud.StatusHref,
args: args{
uri: uri.Devices + "/" + deviceID + cloud.StatusHref,
accept: coap.AppJSON.String(),
},
wantCode: http.StatusOK,
wantContentType: coap.AppJSON.String(),
want: map[interface{}]interface{}{
"rt": []interface{}{"x.cloud.device.status"},
"if": []interface{}{"oic.if.baseline"},
"online": true,
},
},
{
name: "CBOR: " + uri.Devices + "/" + deviceID + cloud.StatusHref,
args: args{
uri: uri.Devices + "/" + deviceID + cloud.StatusHref,
accept: coap.AppOcfCbor.String(),
},
wantCode: http.StatusOK,
wantContentType: coap.AppOcfCbor.String(),
want: map[interface{}]interface{}{
"rt": []interface{}{"x.cloud.device.status"},
"if": []interface{}{"oic.if.baseline"},
"online": true,
},
},
{
name: "JSON: " + uri.Devices + "/" + deviceID + "/light/1",
args: args{
uri: uri.Devices + "/" + deviceID + "/light/1",
accept: coap.AppJSON.String(),
},
wantCode: http.StatusOK,
wantContentType: coap.AppJSON.String(),
want: map[interface{}]interface{}{
"name": "Light",
"power": uint64(0),
"state": false,
},
},
{
name: "CBOR: " + uri.Devices + "/" + deviceID + "/light/1",
args: args{
uri: uri.Devices + "/" + deviceID + "/light/1",
accept: coap.AppOcfCbor.String(),
},
wantCode: http.StatusOK,
wantContentType: coap.AppOcfCbor.String(),
want: map[interface{}]interface{}{
"name": "Light",
"power": uint64(0),
"state": false,
},
},
{
name: "notFound",
args: args{
uri: uri.Devices + "/" + deviceID + "/notFound",
accept: coap.AppJSON.String(),
},
wantCode: http.StatusNotFound,
wantContentType: "text/plain",
want: "cannot retrieve resource: cannot retrieve resource(deviceID: " + deviceID + ", Href: /notFound): cannot retrieve resource(" + cqrs.MakeResourceId(deviceID, "/notFound") + "): cannot retrieve resources values: rpc error: code = NotFound desc = cannot retrieve resources values: not found",
},
{
name: "invalidAccept",
args: args{
uri: uri.Devices + "/" + deviceID + "/light/1",
accept: "application/invalid",
},
wantCode: http.StatusBadRequest,
wantContentType: "text/plain",
want: "cannot retrieve resource: cannot retrieve: invalid accept header([application/invalid])",
},
{
name: "JSON: " + uri.Devices + "//" + deviceID + cloud.StatusHref + "/",
args: args{
uri: uri.Devices + "//" + deviceID + cloud.StatusHref + "/",
accept: coap.AppJSON.String(),
},
wantCode: http.StatusOK,
wantContentType: coap.AppJSON.String(),
want: map[interface{}]interface{}{
"rt": []interface{}{"x.cloud.device.status"},
"if": []interface{}{"oic.if.baseline"},
"online": true,
},
},
}
ctx, cancel := context.WithTimeout(context.Background(), TEST_TIMEOUT)
defer cancel()
ctx = kitNetGrpc.CtxWithToken(ctx, provider.UserToken)
tearDown := c2cTest.SetUp(ctx, t)
defer tearDown()
conn, err := grpc.Dial(grpcTest.GRPC_HOST, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{
RootCAs: grpcTest.GetRootCertificatePool(t),
})))
require.NoError(t, err)
c := pb.NewGrpcGatewayClient(conn)
defer conn.Close()
shutdownDevSim := grpcTest.OnboardDevSim(ctx, t, c, deviceID, grpcTest.GW_HOST, grpcTest.GetAllBackendResourceLinks())
defer shutdownDevSim()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req := c2cTest.NewRequest(http.MethodGet, tt.args.uri, nil).AddHeader("Accept", tt.args.accept).Build(ctx, t)
resp := c2cTest.DoHTTPRequest(t, req)
assert.Equal(t, tt.wantCode, resp.StatusCode)
defer resp.Body.Close()
require.Equal(t, tt.wantContentType, resp.Header.Get("Content-Type"))
if tt.want != nil {
var got interface{}
readFrom := func(w io.Reader, v interface{}) error {
return fmt.Errorf("not supported")
}
switch tt.wantContentType {
case coap.AppJSON.String():
readFrom = json.ReadFrom
case coap.AppCBOR.String(), coap.AppOcfCbor.String():
readFrom = cbor.ReadFrom
case "text/plain":
readFrom = func(w io.Reader, v interface{}) error {
b, err := ioutil.ReadAll(w)
if err != nil {
return err
}
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return fmt.Errorf("some: check must be a pointer")
}
val.Elem().Set(reflect.ValueOf(string(b)))
return nil
}
}
err = readFrom(resp.Body, &got)
require.NoError(t, err)
require.Equal(t, tt.want, got)
}
})
}
}
|
package commands
import (
"fmt"
"github.com/dghubble/go-twitter/twitter"
"github.com/dora1998/snail-bot/repository"
"regexp"
"time"
)
type Command struct {
Name string
HandleFunc func(body string, username string, statusId int64, repo Repository)
}
type CommandHandler struct {
repository Repository
twitterClient TwitterClient
}
type Repository interface {
Add(body string, deadline time.Time, createdBy string) *repository.Task
Remove(id string) error
GetAllTasks() []repository.Task
GetTaskById(id string) *repository.Task
GetTaskByBody(body string) *repository.Task
}
type TwitterClient interface {
Tweet(msg string) (*twitter.Tweet, error)
Reply(msg string, tweetId int64) (*twitter.Tweet, error)
CreateFavorite(tweetId int64) error
IsFollowing(screenName string) bool
TweetLongText(text string, headText string) ([]*twitter.Tweet, error)
}
func NewCommandHandler(repo Repository, twitterClient TwitterClient) *CommandHandler {
return &CommandHandler{repository: repo, twitterClient: twitterClient}
}
func (h *CommandHandler) Resolve(text string, username string, statusId int64) error {
regexpObj := regexp.MustCompile("^(\\S+)(\\s(.+))*$")
res := regexpObj.FindStringSubmatch(text)
if res == nil {
return fmt.Errorf("failed resolve (incorrect pattern)")
}
commandName, commandBody := res[1], res[3]
fmt.Printf("%s: %s\n", commandName, commandBody)
switch commandName {
case "追加":
h.add(commandBody, username, statusId)
case "削除":
h.remove(commandBody, username, statusId)
case "一覧":
h.list(statusId)
default:
return fmt.Errorf("failed resolve (no match)")
}
return nil
}
|
package main
import (
adventutilities "AdventOfCode/utils"
"log"
)
func solvePuzzleN(lines []string, right int, down int)(numTrees int){
lenY := len(lines)
log.Println("numrows = ", lenY)
s := lines[0]
lenX := len(s)
log.Println("numcols = ", lenX)
numTrees = 0
xPos := 0
log.Println("right =", right)
log.Println("down =", down)
for n, line := range lines {
//strictly not a good solution, but hey
if(down == 2 && n%2!=0){
continue
}
for i := 0; i < right; i++ {
//do we need to reset?
if(xPos == lenX ){
xPos = 0
log.Println("on line", n+1, " - resetting xPos")
}
runes := []rune(line)
charToMatch := string(runes[xPos:xPos+1])
if(charToMatch == "#" && i==0){
numTrees++
//log.Println(" new tree on line",n+1, " : num trees:",numTrees)
}
//log.Println("on line ", n+1,"xPos is", xPos+1, "got a",charToMatch)
xPos++
}
}
return numTrees
}
func solvePuzzle1(lines []string)(numTrees int){
return solvePuzzleN(lines, 3,1)
}
func checkResult(testName string, actual int, expected int)(success bool){
if(actual == expected){
log.Println("Test:", testName, "successful, actual",actual,"== expected",expected)
return true
}else{
log.Println("Test:", testName, "failed, expected", expected, "got",actual)
return false
}
}
func main() {
//need to work out how to write tests in Go, but leaving for another day
test := false
log.Println("test:",test)
fileName := "data/inputs_03_12.txt"
if(test){
fileName = "data/testinputs_03_12.txt"
}
lines, err := adventutilities.ReadStringsFromFile(fileName)
adventutilities.Check(err)
oneRightOneDownResult := solvePuzzleN(lines,1,1)
log.Println("solvePuzzleN (right 1, down 1: numTrees:",oneRightOneDownResult)
adventutilities.CheckResult("oneRightOneDown", oneRightOneDownResult, 2)
threeRightOneDownExpected := 200
if(test){
threeRightOneDownExpected = 7
}
threeRightOneDownResult := solvePuzzle1(lines)
log.Println("solvePuzzle1 (threeRightOneDown, numTrees:",threeRightOneDownResult)
adventutilities.CheckResult("threeRightOneDown", threeRightOneDownResult, threeRightOneDownExpected)
fiveRightOneDownResult := solvePuzzleN(lines,5,1)
log.Println("solvePuzzleN (right 5, down 1: numTrees:",fiveRightOneDownResult)
adventutilities.CheckResult("fiveRightOneDown", fiveRightOneDownResult, 3)
sevenRightOneDownResult := solvePuzzleN(lines,7,1)
log.Println("solvePuzzleN (right 7, down 1: numTrees:",sevenRightOneDownResult)
adventutilities.CheckResult("sevenRightOneDown", sevenRightOneDownResult, 4)
oneRightTwoDownResult := solvePuzzleN(lines,1,2)
log.Println("solvePuzzleN (right 1, down 2: numTrees:",oneRightTwoDownResult)
adventutilities.CheckResult("oneRightTwoDown", oneRightTwoDownResult, 2)
product := oneRightOneDownResult * threeRightOneDownResult * fiveRightOneDownResult * sevenRightOneDownResult * oneRightTwoDownResult
log.Println("Product:",product)
adventutilities.CheckResult("Product", product, 336)
}
|
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)
// statusHandler is an http.Handler that writes an empty response using itself
// as the response status code.
type statusHandler int
func (h *statusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(int(*h))
}
func TestIsTagged(t *testing.T) {
// Set up a fake "Google Code" web server reporting 404 not found.
status := statusHandler(http.StatusNotFound)
s := httptest.NewServer(&status)
defer s.Close()
if isTagged(s.URL) {
t.Fatal("isTagged == true, want false")
}
// Change fake server status to 200 OK and try again.
status = http.StatusOK
if !isTagged(s.URL) {
t.Fatal("isTagged == false, want true")
}
}
func TestIntegration(t *testing.T) {
status := statusHandler(http.StatusNotFound)
ts := httptest.NewServer(&status)
defer ts.Close()
// Replace the pollSleep with a closure that we can block and unblock.
sleep := make(chan bool)
pollSleep = func(time.Duration) {
sleep <- true
sleep <- true
}
// Replace pollDone with a closure that will tell us when the poller is
// exiting.
done := make(chan bool)
pollDone = func() { done <- true }
// Put things as they were when the test finishes.
defer func() {
pollSleep = time.Sleep
pollDone = func() {}
}()
s := NewServer("1.x", ts.URL, 1*time.Millisecond)
<-sleep // Wait for poll loop to start sleeping.
// Make first request to the server.
r, _ := http.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
s.ServeHTTP(w, r)
if b := w.Body.String(); !strings.Contains(b, "No.") {
t.Fatalf("body = %s, want no", b)
}
status = http.StatusOK
<-sleep // Permit poll loop to stop sleeping.
<-done // Wait for poller to see the "OK" status and exit.
// Make second request to the server.
w = httptest.NewRecorder()
s.ServeHTTP(w, r)
if b := w.Body.String(); !strings.Contains(b, "YES!") {
t.Fatalf("body = %q, want yes", b)
}
}
|
package main
import (
"fmt"
"math/rand"
"net"
"os"
"time"
)
// Standard Channels
// VERSION_ROLLING_BITS = 16
// NONCE_BITS = 32
type MessageFrame struct {
ExtensionType uint16
MsgType uint8
// MsgLength uint24
Payload []byte
}
type SignatureNoiseMessage struct {
Version uint16
ValidFrom uint32
NotValidAfter uint32
// Signature ed25519
}
type Certificate struct {
Version uint16
ValidFrom uint32
NotValidAfter uint32
// PublicKey PUBKEY
// AuthorityPublicKey PUBKEY
// Signature ed25519
}
func main() {
arguments := os.Args
if len(arguments) == 1 {
fmt.Println("Please provide a port number!")
return
}
// Check for config
// Check for pool
// Start server
PORT := ":" + arguments[1]
l, err := net.Listen("tcp4", PORT)
if err != nil {
fmt.Println(err)
return
}
defer l.Close()
rand.Seed(time.Now().Unix())
for {
c, err := l.Accept()
if err != nil {
fmt.Println(err)
return
}
go HandleConnection(c)
}
}
|
package chatroom
import ("testing")
/*
// Acceptance test
func Test_our_chatroom_is_live(t *testing.T) {
var chatroom *Chatroom = new (Chatroom)
bigJim := ClientMock{"Big Jim", chatroom, noMsg}
barbie:= ClientMock{"Barbie", chatroom, noMsg}
bigJim.enter()
barbie.enter()
if (bigJim.popLastMessage() != "Barbie entered the room") {
t.Error("Big Jim should be notified of Barbie's arrival");
}
bigJim.says("I am stronger than you Barbie");
if (barbie.popLastMessage() != "Big Jim: I am stronger than you Barbie") {
t.Error("Barbie should receive Big Jim's messages");
}
if (bigJim.popLastMessage() != "Big Jim: I am stronger than you Barbie") {
t.Error("Big Jim should receive his own messages");
}
bigJim.leaves();
if (barbie.popLastMessage() != "Big Jim left the room") {
t.Error("Barbie should be notified of Big Jim's departure");
}
barbie.says("Better of alone");
if (bigJim.popLastMessage() != "") {
t.Error("Big Jim should not receive messages after leaving");
}
}
*/
/*
// Test plan
enter
messages
own messages
multithreading of messages
exit
no more messages after exit
*/
type ClientMock struct {
_name string
chatroom *Chatroom
lastMessage string
}
func (client *ClientMock) notify(message string) {
client.lastMessage = message
}
func (client *ClientMock) enter(){
client.chatroom.enter(client)
}
func (client *ClientMock) name() string{
return client._name
}
var noMsg string = "vide"
func (client *ClientMock) popLastMessage () string {
var result = client.lastMessage
client.lastMessage = noMsg;
return result;
}
func Test_clients_can_enter_the_chatroom(t *testing.T) {
var chatroom *Chatroom = newChatroom()
bigJim := ClientMock{"Big Jim", chatroom, noMsg}
bigJim.enter()
if (bigJim.popLastMessage() != "Big Jim entered the room") {
t.Error("FAIL: Big Jim should be notified of his arrival")
}
}
func Test_our_chatroom_is_live(t *testing.T) {
var chatroom *Chatroom = newChatroom()
bigJim := ClientMock{"Big Jim", chatroom, noMsg}
barbie := ClientMock{"Barbie", chatroom, noMsg}
bigJim.enter()
barbie.enter()
if (bigJim.popLastMessage() != "Barbie entered the room") {
t.Error("FAIL: Big Jim should be notified of Barbie's arrival");
}
} |
package main
import "strings"
func encrypt(text string, key string) string {
result := []rune(strings.ToLower(text))
for i := 0; i < len(result); i++ {
if strings.ContainsRune(En, result[i]) {
result[i] = (((result[i] - a) + (rune(key[i%len(key)]) - a)) % Alphabet) + a
}
}
return string(result)
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-26 17:00
* Description:
*****************************************************************/
package regcenter
import (
"fmt"
"github.com/go-xe2/x/crypto/xmd5"
"github.com/go-xe2/x/encoding/xparser"
"github.com/go-xe2/x/encoding/xyaml"
"github.com/go-xe2/x/os/xfile"
"github.com/go-xe2/x/os/xlog"
"github.com/go-xe2/x/type/t"
"github.com/go-xe2/xthrift/pdl"
"io/ioutil"
"os"
)
func (p *THostStore) loadFile(fileName string, md5 string, fileId int) (fileMd5 string, err error) {
xlog.Debug("load host file:", fileName)
if !xfile.Exists(fileName) {
return "", nil
}
if curMd5, err := xmd5.EncryptFile(fileName); err != nil {
return "", err
} else {
if md5 != curMd5 {
fileMd5 = curMd5
} else {
// 文件没有变动,不处理
return md5, nil
}
}
file, err := xfile.OpenWithFlag(fileName, os.O_RDONLY)
if err != nil {
return fileMd5, err
}
fileData, err := ioutil.ReadAll(file)
if err != nil {
return fileMd5, err
}
defer file.Close()
parser, err := xparser.LoadContent(fileData)
if err != nil {
return fileMd5, err
}
if fileId >= 0 {
p.removeNodeByFileId(fileId)
}
mp := parser.ToMap()
for k, v := range mp {
items, ok := v.([]interface{})
if !ok {
continue
}
var hostItems map[string]*THostStoreToken
if tmp, ok := p.items[k]; ok {
hostItems = tmp
} else {
hostItems = make(map[string]*THostStoreToken)
}
for _, item := range items {
host := ""
port := 0
project := ""
itemMp, itemOk := item.(map[string]interface{})
if !itemOk {
continue
}
if s, ok := itemMp["host"].(string); ok {
host = s
}
if n, ok := itemMp["port"]; ok {
port = t.Int(n)
}
if s, ok := itemMp["project"].(string); ok {
project = s
}
if host == "" || port == 0 {
continue
}
hostItems[fmt.Sprintf("%s:%d", host, port)] = &THostStoreToken{Project: project, Host: host, Port: port, fileId: fileId, Ext: 0}
}
if len(hostItems) > 0 {
p.items[k] = hostItems
} else {
delete(p.items, k)
}
}
return fileMd5, nil
}
func (p *THostStore) loadHostFileMd5() error {
md5File := xfile.Join(p.savePath, "host_file.md5")
if !xfile.Exists(md5File) {
return nil
}
fileData := xfile.GetBinContents(md5File)
p.filesMd5.Clear()
var mp map[string]interface{}
err := xyaml.DecodeTo(fileData, &mp)
if err != nil {
xlog.Debug("load file md5 error:", err)
return err
}
for k, v := range mp {
if s, ok := v.(string); ok {
p.filesMd5.Set(k, s)
}
}
return nil
}
func (p *THostStore) saveHostFileMd5() error {
md5Bytes, err := xyaml.Encode(p.filesMd5)
if err != nil {
return err
}
md5File := xfile.Join(p.savePath, "host_file.md5")
file, err := xfile.OpenWithFlag(md5File, os.O_CREATE|os.O_TRUNC|os.O_RDWR)
if err != nil {
xlog.Debug("open md5 file error:", err)
return err
}
defer file.Close()
if _, err := file.Write(md5Bytes); err != nil {
xlog.Debug("save file md5 error:", err)
return err
}
return nil
}
func (p *THostStore) Load() error {
if !xfile.Exists(p.HostFilePath()) {
if err := xfile.Mkdir(p.HostFilePath()); err != nil {
return err
}
}
files, err := xfile.ScanDir(p.HostFilePath(), fmt.Sprintf("*%s", p.fileExt), true)
if err != nil {
xlog.Debug("scanDir error:", err)
return err
}
// 清空原数据
p.items = make(map[string]map[string]*THostStoreToken)
p.fileIds = make(map[string]int)
p.maxFileId = 0
// 加载md5文件
//if err := p.loadHostFileMd5(); err != nil {
// xlog.Debug("loadFileMd5 error:", err)
// return err
//}
p.filesMd5.Clear()
for _, fileName := range files {
// 生成文件id
baseName := xfile.Basename(fileName)
fileId := p.maxFileId
p.maxFileId++
p.fileIds[baseName] = fileId
var md5 = ""
if p.filesMd5.Contains(baseName) {
md5 = p.filesMd5.Get(baseName)
}
if curMd5, err := p.loadFile(fileName, md5, fileId); err != nil {
xlog.Error(err)
} else {
if curMd5 != md5 {
p.filesMd5.Set(baseName, curMd5)
}
}
}
return p.saveHostFileMd5()
}
func (p *THostStore) AddHostWithProject(proj *pdl.FileProject, host string, port int, ext ...int) {
if proj == nil {
return
}
services := proj.AllServices()
for k := range services {
p.AddHost(proj.GetProjectName(), k, host, port, ext...)
}
}
func (p *THostStore) AddHost(project string, svcFullName string, host string, port int, ext ...int) {
k := fmt.Sprintf("%s:%d", host, port)
node, ok := p.items[svcFullName]
fileName := fmt.Sprintf("%s%s", project, p.fileExt)
fileId := 0
if n, ok := p.fileIds[fileName]; ok {
fileId = n
} else {
p.maxFileId++
fileId = p.maxFileId
p.fileIds[fileName] = fileId
}
nExt := 0
if len(ext) > 0 {
nExt = ext[0]
}
if ok {
if _, ok := node[k]; !ok {
node[fmt.Sprintf("%s:%d", host, port)] = &THostStoreToken{Project: project, Host: host, Port: port, isSaved: false, fileId: fileId, Ext: nExt}
}
} else {
node = make(map[string]*THostStoreToken)
node[k] = &THostStoreToken{Project: project, Host: host, Port: port, isSaved: false, fileId: fileId, Ext: nExt}
}
p.items[svcFullName] = node
}
func (p *THostStore) HasProject(project string) bool {
isRef := false
for _, host := range p.items {
if isRef {
break
}
for _, node := range host {
if node.Project == project {
isRef = true
break
}
}
}
return isRef
}
// 注销提供服务地址
func (p *THostStore) RemoveHost(host string, port int) error {
k := fmt.Sprintf("%s:%d", host, port)
isModify := false
project := ""
for _, host := range p.items {
for curK, node := range host {
if curK == k {
if project == "" {
project = node.Project
}
delete(host, curK)
isModify = true
}
}
}
if isModify {
if err := p.Save(); err != nil {
return err
}
}
return nil
}
func (p *THostStore) RemoveProject(project string) error {
isModify := false
for _, host := range p.items {
for k, node := range host {
if node.Project == project {
delete(host, k)
isModify = true
}
}
}
if isModify {
if err := p.Save(); err != nil {
return err
}
if p.savePath == "" {
return nil
}
}
return nil
}
func (p *THostStore) removeNodeByFileId(fileId int) {
for _, host := range p.items {
for k, node := range host {
if node.fileId == fileId {
delete(host, k)
}
}
}
}
func (p *THostStore) RemoveFile(fileName string) {
p.DisableFileWatch()
defer func() {
if err := p.EnableFileWatch(); err != nil {
xlog.Error(err)
}
}()
baseName := xfile.Basename(fileName)
fileId, ok := p.fileIds[baseName]
if !ok {
fileId = 0
}
isModify := false
for _, host := range p.items {
for k, node := range host {
if node.fileId == fileId {
delete(host, k)
isModify = true
}
}
}
if isModify {
p.filesMd5.Remove(baseName)
delete(p.fileIds, baseName)
pathName := xfile.Join(p.savePath, fileName)
if xfile.Exists(pathName) {
if err := xfile.Remove(pathName); err != nil {
xlog.Error(err)
}
}
}
}
func (p *THostStore) saveFile(fileName string, items map[string][]*THostStoreToken) error {
p.DisableFileWatch()
defer func() {
if err := p.EnableFileWatch(); err != nil {
xlog.Error(err)
}
}()
if !xfile.Exists(p.savePath) {
if err := xfile.Mkdir(p.savePath); err != nil {
return err
}
}
pathName := xfile.Join(p.savePath, fileName)
bytes, err := xyaml.Encode(items)
if err != nil {
return err
}
file, err := xfile.OpenWithFlag(pathName, os.O_CREATE|os.O_TRUNC|os.O_RDWR)
if err != nil {
return err
}
defer file.Close()
if _, err := file.Write(bytes); err != nil {
return err
}
md5, err := xmd5.Encrypt(bytes)
if err != nil {
return err
}
p.filesMd5.Set(fileName, md5)
return nil
}
func (p *THostStore) Save() error {
// host按文件保存
files := make(map[int]map[string][]*THostStoreToken)
for svcName, v := range p.items {
for _, node := range v {
file, fileOk := files[node.fileId]
if !fileOk {
file = make(map[string][]*THostStoreToken)
files[node.fileId] = file
}
fileItems, svcOk := file[svcName]
if !svcOk {
fileItems = make([]*THostStoreToken, 0)
}
fileItems = append(fileItems, node)
file[svcName] = fileItems
}
}
fileNames := make(map[int]string)
for k, v := range p.fileIds {
fileNames[v] = k
}
var fileName = ""
p.DisableFileWatch()
defer func() {
if err := p.EnableFileWatch(); err != nil {
xlog.Error(err)
}
}()
// 保存文件
for fileId, items := range files {
fileName = ""
if s, ok := fileNames[fileId]; !ok {
continue
} else {
fileName = s
}
if err := p.saveFile(fileName, items); err != nil {
xlog.Error(err)
}
}
return nil
}
func (p *THostStore) AllHosts() map[string][]*THostStoreToken {
result := make(map[string][]*THostStoreToken)
for svc, items := range p.items {
arr := make([]*THostStoreToken, len(items))
i := 0
for _, node := range items {
arr[i] = node
i++
}
result[svc] = arr
}
return result
}
func (p *THostStore) FileHosts(fileId int) map[string][]*THostStoreToken {
result := make(map[string][]*THostStoreToken)
for svc, items := range p.items {
for _, node := range items {
if node.fileId == fileId {
arr, ok := result[svc]
if !ok {
arr = make([]*THostStoreToken, 0)
}
arr = append(arr, node)
result[svc] = arr
}
}
}
return result
}
func (p *THostStore) FileHostsByName(fileName string) map[string][]*THostStoreToken {
if n, ok := p.fileIds[fileName]; ok {
return p.FileHosts(n)
}
return nil
}
func (p *THostStore) AllFileID() map[string]int {
return p.fileIds
}
func (p *THostStore) AllFileMd5() map[string]string {
return p.filesMd5.Map()
}
func (p *THostStore) GetSvcHosts(fullSvcName string) []*THostStoreToken {
hosts := make([]*THostStoreToken, 0)
hostNode, ok := p.items[fullSvcName]
if !ok {
return hosts
}
for _, n := range hostNode {
hosts = append(hosts, n)
}
return hosts
}
|
package remove
import (
"errors"
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/pkg/util/factory"
"github.com/devspace-cloud/devspace/pkg/util/message"
"github.com/spf13/cobra"
)
type imageCmd struct {
*flags.GlobalFlags
RemoveAll bool
}
func newImageCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command {
cmd := &imageCmd{GlobalFlags: globalFlags}
imageCmd := &cobra.Command{
Use: "image",
Short: "Removes one or all images from the devspace",
Long: `
#######################################################
############ devspace remove image ####################
#######################################################
Removes one or all images from a devspace:
devspace remove image default
devspace remove image --all
#######################################################
`,
Args: cobra.MaximumNArgs(1),
RunE: func(cobraCmd *cobra.Command, args []string) error {
return cmd.RunRemoveImage(f, cobraCmd, args)
}}
imageCmd.Flags().BoolVar(&cmd.RemoveAll, "all", false, "Remove all images")
return imageCmd
}
// RunRemoveImage executes the remove image command logic
func (cmd *imageCmd) RunRemoveImage(f factory.Factory, cobraCmd *cobra.Command, args []string) error {
// Set config root
log := f.GetLog()
configLoader := f.NewConfigLoader(cmd.ToConfigOptions(), log)
configExists, err := configLoader.SetDevSpaceRoot()
if err != nil {
return err
}
if !configExists {
return errors.New(message.ConfigNotFound)
}
config, err := configLoader.LoadWithoutProfile()
if err != nil {
return err
}
configureManager := f.NewConfigureManager(config, log)
err = configureManager.RemoveImage(cmd.RemoveAll, args)
if err != nil {
return err
}
err = configLoader.Save(config)
if err != nil {
return err
}
if cmd.RemoveAll {
log.Done("Successfully removed all images")
} else {
log.Donef("Successfully removed image %s", args[0])
}
return nil
}
|
package routes
import (
"aplicacoes/projeto-zumbie/config"
"aplicacoes/projeto-zumbie/controller"
"fmt"
"log"
"net/http"
"github.com/gorilla/mux"
)
var portaAplicacao string
// HandleFunc ...
func HandleFunc() {
rotas := mux.NewRouter()
config.TryConn()
portaAplicacao = ":3000"
fmt.Println("Aplicação ON: porta => ", portaAplicacao)
rotas.HandleFunc("/api/", controller.HomeAPI).Methods("GET")
rotas.HandleFunc("/api/sobreviventes", controller.BuscarTodosSobrevivente).Methods("GET")
rotas.HandleFunc("/api/adicionar/sobrevivente", controller.AdicionarNovoSobrevivente).Methods("POST")
rotas.HandleFunc("/api/sobrevivente/{sobrevivente1}/{sobrevivente2}", controller.BuscarSobreviventes).Methods("GET")
rotas.HandleFunc("/api/trocar", controller.RealizarTroca).Methods("POST")
log.Fatal(http.ListenAndServe(portaAplicacao, rotas))
}
|
package main
import "fmt"
type st1 struct {
Id int
Name string
}
type st2 struct {
Id int
Name string
}
func main(){
s1:= st1{12,"abc2"}
s2 := st2(s1)
fmt.Println("", s1, s2)
}
|
package pov
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/google/uuid"
"github.com/qlcchain/go-qlc/common"
"github.com/qlcchain/go-qlc/common/event"
"github.com/qlcchain/go-qlc/common/types"
"github.com/qlcchain/go-qlc/config"
"github.com/qlcchain/go-qlc/ledger"
"github.com/qlcchain/go-qlc/mock"
"github.com/qlcchain/go-qlc/trie"
)
type povProcessorMockData struct {
config *config.Config
eb event.EventBus
ledger ledger.Store
chain PovProcessorChainReader
verifier PovProcessorVerifier
syncer PovProcessorSyncer
}
type mockPovProcessorChainReader struct {
blocks map[types.Hash]*types.PovBlock
}
func (c *mockPovProcessorChainReader) HasBestBlock(hash types.Hash, height uint64) bool {
if c.blocks[hash] != nil {
return true
}
return false
}
func (c *mockPovProcessorChainReader) GetBlockByHash(hash types.Hash) *types.PovBlock {
genesisBlk := common.GenesisPovBlock()
if hash == genesisBlk.Hash {
return &genesisBlk
}
blk := c.blocks[hash]
if blk != nil {
return blk
}
return nil
}
func (c *mockPovProcessorChainReader) InsertBlock(block *types.PovBlock, stateTrie *trie.Trie) error {
c.blocks[block.Hash] = block
return nil
}
type mockPovProcessorVerifier struct{}
func (v *mockPovProcessorVerifier) VerifyFull(block *types.PovBlock) *PovVerifyStat {
return &PovVerifyStat{}
}
type mockPovProcessorSyncer struct{}
func (s *mockPovProcessorSyncer) requestBlocksByHashes(reqBlkHashes []*types.Hash, peerID string) {}
func (s *mockPovProcessorSyncer) requestTxsByHashes(reqTxHashes []*types.Hash, peerID string) {}
func setupPovProcessorTestCase(t *testing.T) (func(t *testing.T), *povProcessorMockData) {
t.Parallel()
md := &povProcessorMockData{
chain: &mockPovProcessorChainReader{
blocks: make(map[types.Hash]*types.PovBlock),
},
verifier: &mockPovProcessorVerifier{},
syncer: &mockPovProcessorSyncer{},
}
uid := uuid.New().String()
rootDir := filepath.Join(config.QlcTestDataDir(), uid)
md.config, _ = config.DefaultConfig(rootDir)
lDir := filepath.Join(rootDir, "ledger")
_ = os.RemoveAll(lDir)
md.ledger = ledger.NewLedger(lDir)
md.eb = event.GetEventBus(lDir)
return func(t *testing.T) {
err := md.ledger.DBStore().Close()
if err != nil {
t.Fatal(err)
}
err = os.RemoveAll(rootDir)
if err != nil {
t.Fatal(err)
}
err = md.eb.Close()
if err != nil {
t.Fatal(err)
}
}, md
}
func TestPovProcessor_AddBlock(t *testing.T) {
teardownTestCase, md := setupPovProcessorTestCase(t)
defer teardownTestCase(t)
processor := NewPovBlockProcessor(md.eb, md.ledger, md.chain, md.verifier, md.syncer)
processor.Init()
processor.Start()
genesisBlk := common.GenesisPovBlock()
blk1, _ := mock.GeneratePovBlock(&genesisBlk, 0)
processor.AddBlock(blk1, types.PovBlockFromRemoteBroadcast, "test")
blk2, _ := mock.GeneratePovBlock(blk1, 0)
processor.AddBlock(blk2, types.PovBlockFromRemoteBroadcast, "test")
blk3, _ := mock.GeneratePovBlock(blk2, 0)
processor.AddBlock(blk3, types.PovBlockFromRemoteBroadcast, "test")
time.Sleep(time.Second)
retBlk1 := md.chain.GetBlockByHash(blk1.GetHash())
if retBlk1 == nil {
t.Fatalf("failed to add block1 %s", blk1.GetHash())
}
retBlk2 := md.chain.GetBlockByHash(blk2.GetHash())
if retBlk2 == nil {
t.Fatalf("failed to add block2 %s", blk2.GetHash())
}
retBlk3 := md.chain.GetBlockByHash(blk3.GetHash())
if retBlk3 == nil {
t.Fatalf("failed to add block3 %s", blk3.GetHash())
}
processor.Stop()
}
func TestPovProcessor_OrphanBlock(t *testing.T) {
teardownTestCase, md := setupPovProcessorTestCase(t)
defer teardownTestCase(t)
processor := NewPovBlockProcessor(md.eb, md.ledger, md.chain, md.verifier, md.syncer)
processor.Init()
processor.Start()
processor.onPovSyncState(common.Syncdone)
genesisBlk := common.GenesisPovBlock()
blk1, _ := mock.GeneratePovBlock(&genesisBlk, 0)
processor.AddBlock(blk1, types.PovBlockFromRemoteBroadcast, "test")
blk2, _ := mock.GeneratePovBlock(blk1, 0)
blk3, _ := mock.GeneratePovBlock(blk2, 0)
processor.AddBlock(blk3, types.PovBlockFromRemoteBroadcast, "test")
blk4, _ := mock.GeneratePovBlock(blk3, 0)
processor.AddBlock(blk4, types.PovBlockFromRemoteBroadcast, "test")
time.Sleep(time.Second)
processor.onRequestOrphanBlocksTimer()
processor.onCheckOrphanBlocksTimer()
retBlk1 := md.chain.GetBlockByHash(blk1.GetHash())
if retBlk1 == nil {
t.Fatalf("failed to add block1 %s", blk1.GetHash())
}
retBlk3 := md.chain.GetBlockByHash(blk3.GetHash())
if retBlk3 != nil {
t.Fatalf("block3 %s is not orphan", blk3.GetHash())
}
processor.AddBlock(blk2, types.PovBlockFromRemoteBroadcast, "test")
time.Sleep(time.Second)
retBlk2 := md.chain.GetBlockByHash(blk2.GetHash())
if retBlk2 == nil {
t.Fatalf("failed to add block2 %s", blk2.GetHash())
}
retBlk3 = md.chain.GetBlockByHash(blk3.GetHash())
if retBlk3 == nil {
t.Fatalf("failed to add block3 %s", blk3.GetHash())
}
retBlk4 := md.chain.GetBlockByHash(blk4.GetHash())
if retBlk4 == nil {
t.Fatalf("failed to add block4 %s", blk4.GetHash())
}
processor.Stop()
}
|
package main
import (
"fmt"
"github.com/nsf/termbox-go"
)
type Scene interface {
React(g *Game, e termbox.Event) error
SetView()
}
type Game struct {
noCircleWin int
noCrossWin int
running bool
scene Scene
firstTurn Cell
}
func NewGame() *Game {
return &Game{
scene: NewMenu(),
running: false,
}
}
func (g *Game) Init() error {
if err := termbox.Init(); err != nil {
return err
}
if err := termbox.Clear(termbox.ColorDefault, termbox.ColorDefault); err != nil {
return err
}
return termbox.Flush()
}
func (g *Game) Close() {
g.running = false
termbox.Close()
}
func (g *Game) SetFirstTurn(c Cell) {
g.firstTurn = c
}
func (g *Game) GetFirstTurn() Cell {
return g.firstTurn
}
func (g *Game) SetScene(s Scene) {
g.scene = s
}
func (g *Game) CountUpCircleWin() {
g.noCircleWin++
}
func (g *Game) CountUpCrossWin() {
g.noCrossWin++
}
func (g *Game) setBaseView() {
x, y := tbxCenterXY()
tbxSetText(x-9, y-4, "Circle Cross Game",
termbox.ColorGreen, termbox.ColorDefault)
tbxSetText(x-14, y+4, "(Press q or Ctrl-c to exit)",
termbox.ColorGreen, termbox.ColorDefault)
if !(g.noCircleWin == 0 && g.noCrossWin == 0) {
tbxSetText(x-12, y-1, fmt.Sprintf("Circle: %d", g.noCircleWin),
termbox.ColorGreen, termbox.ColorDefault)
tbxSetText(x-12, y+1, fmt.Sprintf(" Cross: %d", g.noCrossWin),
termbox.ColorGreen, termbox.ColorDefault)
}
tbxSetFrame(x-2, y-2, x+2, y+2, termbox.ColorGreen)
}
func (g *Game) setView() error {
if err := termbox.Clear(termbox.ColorDefault, termbox.ColorDefault); err != nil {
return err
}
g.setBaseView()
g.scene.SetView()
return termbox.Flush()
}
func (g *Game) react(e termbox.Event) error {
return g.scene.React(g, e)
}
func (g *Game) Main() error {
g.running = true
for g.running {
if err := g.setView(); err != nil {
return err
}
e := termbox.PollEvent()
if err := g.react(e); err != nil {
return err
}
}
return nil
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"time"
)
func main() {
filename := flag.String("file", "", "log file (default: stdout)")
flag.Parse()
outFile := os.Stdout
if *filename != "" {
file, err := os.OpenFile(*filename, os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
log.Fatal(err)
}
outFile = file
}
fmt.Fprintln(outFile, "Starting timer...")
t := time.Tick(1 * time.Second)
for now := range t {
fmt.Fprintf(outFile, "%v \n", now)
}
}
|
// Copyright 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This test suite includes test cases to verify basic functionality
// in most common configurations
// +build runalways runoncewin
package e2e
import (
"github.com/vmware/vsphere-storage-for-docker/tests/utils/dockercli"
"github.com/vmware/vsphere-storage-for-docker/tests/utils/inputparams"
"github.com/vmware/vsphere-storage-for-docker/tests/utils/misc"
"github.com/vmware/vsphere-storage-for-docker/tests/utils/verification"
. "gopkg.in/check.v1"
)
type BasicTestSuite struct {
config *inputparams.TestConfig
esx string
vm1 string
vm2 string
vm1Name string
vm2Name string
volName1 string
volName2 string
containerName string
}
func (s *BasicTestSuite) SetUpSuite(c *C) {
s.config = inputparams.GetTestConfig()
if s.config == nil {
c.Skip("Unable to retrieve test config, skipping basic tests")
}
s.esx = s.config.EsxHost
s.vm1 = s.config.DockerHosts[0]
s.vm1Name = s.config.DockerHostNames[0]
if len(s.config.DockerHosts) == 2 {
s.vm2 = s.config.DockerHosts[1]
s.vm2Name = s.config.DockerHostNames[1]
}
}
func (s *BasicTestSuite) SetUpTest(c *C) {
s.volName1 = inputparams.GetUniqueVolumeName(c.TestName())
s.volName2 = inputparams.GetUniqueVolumeName(c.TestName())
s.containerName = inputparams.GetUniqueContainerName(c.TestName())
}
var _ = Suite(&BasicTestSuite{})
// Test volume lifecycle management on different datastores:
// VM1 - created on local VMFS datastore
// VM2 - created on shared VMFS datastore
// VM3 - created on shared VSAN datastore (TODO: currently not available)
//
// Test steps:
// 1. Create a volume, re-create the volume, verify the create is idempotent
// 2. Verify the volume is available
// 3. Attach the volume
// 4. Verify volume status is attached
// 5. Remove the volume (expect fail)
// 6. Remove the container
// 7. Verify volume status is detached
// 8. Remove the volume
// 9. Verify the volume is unavailable
func (s *BasicTestSuite) TestVolumeLifecycle(c *C) {
misc.LogTestStart(c.TestName())
for _, host := range s.config.DockerHosts {
out, err := dockercli.CreateVolume(host, s.volName1)
c.Assert(err, IsNil, Commentf(out))
out, err = dockercli.CreateVolume(host, s.volName1)
c.Assert(err, IsNil, Commentf(out))
accessible := verification.CheckVolumeAvailability(host, s.volName1)
c.Assert(accessible, Equals, true, Commentf("Volume %s is not available", s.volName1))
out, err = dockercli.AttachVolume(host, s.volName1, s.containerName)
c.Assert(err, IsNil, Commentf(out))
status := verification.VerifyAttachedStatus(s.volName1, host, s.esx)
c.Assert(status, Equals, true, Commentf("Volume %s is not attached", s.volName1))
out, err = dockercli.DeleteVolume(host, s.volName1)
c.Assert(err, Not(IsNil), Commentf(out))
out, err = dockercli.RemoveContainer(host, s.containerName)
c.Assert(err, IsNil, Commentf(out))
status = verification.VerifyDetachedStatus(s.volName1, host, s.esx)
c.Assert(status, Equals, true, Commentf("Volume %s is still attached", s.volName1))
out, err = dockercli.DeleteVolume(host, s.volName1)
c.Assert(err, IsNil, Commentf(out))
accessible = verification.CheckVolumeAvailability(host, s.volName1)
c.Assert(accessible, Equals, false, Commentf("Volume %s is still available", s.volName1))
}
misc.LogTestEnd(c.TestName())
}
|
// @Description jwt
// @Author jiangyang
// @Created 2020/11/17 4:12 下午
package jwt
import (
"time"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/pkg/errors"
)
const DefaultExpireDuration = time.Hour * 24 * 30
var (
ErrTokenExpired = errors.New("Token is expired")
ErrTokenNotValidYet = errors.New("Token not active yet")
ErrTokenMalformed = errors.New("That's not even a token")
ErrTokenInvalid = errors.New("Couldn't handle this token")
SignKey = []byte("243223ffslsfsldfl412fdsfsdf")
)
type Business struct {
UID uint `json:"uid"`
Role uint `json:"role"`
}
type CustomClaims struct {
Business interface{}
jwtgo.StandardClaims
}
type TokenResp struct {
Token string `json:"token"`
ExpiredAt string `json:"expired_at"`
}
func Init(key string) {
SignKey = []byte(key)
}
// 创建Token
func CreateToken(bus interface{}, expires time.Duration) (*TokenResp, error) {
expiresAt := time.Now().Add(DefaultExpireDuration).Unix()
if expires != 0 {
expiresAt = time.Now().Add(expires).Unix()
}
claims := &CustomClaims{
Business: bus,
StandardClaims: jwtgo.StandardClaims{
ExpiresAt: expiresAt,
},
}
token := jwtgo.NewWithClaims(jwtgo.SigningMethodHS256, claims)
tokenStr, err := token.SignedString(SignKey)
if err != nil {
return nil, err
}
return &TokenResp{
Token: tokenStr,
ExpiredAt: time.Unix(expiresAt, 0).Format("2006-01-02 15:04:05"),
}, nil
}
// 解析Token
func ParseToken(tokenString string) (interface{}, error) {
customClaims := CustomClaims{}
token, err := jwtgo.ParseWithClaims(tokenString, &customClaims, func(token *jwtgo.Token) (interface{}, error) {
return SignKey, nil
})
if err != nil {
if ve, ok := err.(*jwtgo.ValidationError); ok {
if ve.Errors&jwtgo.ValidationErrorMalformed != 0 {
return nil, ErrTokenMalformed
} else if ve.Errors&jwtgo.ValidationErrorExpired != 0 {
return nil, ErrTokenExpired
} else if ve.Errors&jwtgo.ValidationErrorNotValidYet != 0 {
return nil, ErrTokenNotValidYet
} else {
return nil, ErrTokenInvalid
}
}
}
if token == nil || !token.Valid {
return nil, ErrTokenInvalid
}
return customClaims.Business, nil
}
|
package core
import (
"errors"
"flag"
"fmt"
)
func MakeInputData() (iData *InputData, err error) {
//var iData InputData
arguments := makeRawArgumentString()
fmt.Printf("%+v\n", *arguments)
if passed, err := arguments.validate(); !passed {
fmt.Println("raw input data validation did not passed")
return nil, err
}
arguments.constructInputData()
return nil, errors.New("undefined")
}
func (r *RawInputData) constructInputData() (iData *InputData, err error) {
iData = &InputData{}
return nil, nil
}
//parse Argument String
func makeRawArgumentString() *RawInputData {
rInputData := &RawInputData{}
flag.StringVar(&rInputData.Mode, "Mode", "PublicKeyValidation", "What would you like to verify? [PrivateToPublic,...]")
flag.StringVar(&rInputData.PrivateKey, "PrivateKey", "", "Path to private key")
flag.StringVar(&rInputData.PublicKey, "PublicKey", "", "Path to public key")
flag.Parse()
return rInputData
}
func (r *RawInputData) validate() (passed bool, err error) {
state, err := validateModeState(r.Mode)
if err != nil {
return
}
r.State = state
switch state {
case PublicKeyValidation:
passed, err = state.validatePath(r.PrivateKey)
default:
err = errors.New("unknown program Mode type")
}
return passed, err
}
func validateModeState(mode string) (state ModeState, err error) {
if len(mode) == 0 {
fmt.Println("Program Mode string is empty. It has to be one one of the ", ProgramState)
return state, errors.New("program Mode string is empty")
}
if pState, ok := ProgramState[mode]; ok {
return pState, nil
}
return state, errors.New("program Mode has not been found in the available set")
}
func (m *ModeState) validatePath(paths ...string) (isValid bool, err error) {
// this is because it has to catch errors of regexp compilation
defer func() {
if rec := recover(); rec != nil {
err = errors.New(fmt.Sprintf("%v", rec))
return
}
}()
lpType := getRegExpByOS()
for _, path := range paths {
if len(path) == 0 {
fmt.Println("Program Mode string is empty. It has to be one one of the ", ProgramState)
return false, errors.New("program Mode string is empty")
}
if isPathValid := lpType.RegExpValidation(path); !isPathValid {
fmt.Println("RegExpValidation did not pass for ", path)
return false, errors.New("file path format is not appropriate")
}
if isFileValid := FileValidation(path); !isFileValid {
fmt.Println("FileValidation did not pass for ", path)
return false, errors.New("file is broken or invalid")
}
}
return true, nil
}
func getRegExpByOS() RegExpStore {
if IsItRunInPosix() {
return WinPathRegex
}
return PosixPathRegex
}
|
// Copyright (c) 2019 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package main
import (
"fmt"
"github.com/vladimirvivien/gexe"
ci "github.com/vmware-tanzu/crash-diagnostics/.ci/common"
)
func main() {
arches := []string{"amd64"}
oses := []string{"darwin", "linux"}
e := gexe.New()
e.SetEnv("PKG_ROOT", ci.PkgRoot)
e.SetEnv("VERSION", ci.Version)
e.SetEnv("GIT_SHA", ci.GitSHA)
e.SetEnv("LDFLAGS", `"-X ${PKG_ROOT}/buildinfo.Version=${VERSION} -X ${PKG_ROOT}/buildinfo.GitSHA=${GIT_SHA}"`)
for _, arch := range arches {
for _, os := range oses {
binary := fmt.Sprintf(".build/%s/%s/crash-diagnostics", arch, os)
gobuild(arch, os, e.Val("LDFLAGS"), binary)
}
}
}
func gobuild(arch, os, ldflags, binary string) {
b := gexe.New()
b.Conf.SetPanicOnErr(true)
b.SetVar("arch", arch)
b.SetVar("os", os)
b.SetVar("ldflags", ldflags)
b.SetVar("binary", binary)
result := b.Envs("CGO_ENABLED=0 GOOS=$os GOARCH=$arch").Run("go build -o $binary -ldflags $ldflags .")
if result != "" {
fmt.Printf("Build for %s/%s failed: %s\n", arch, os, result)
return
}
fmt.Printf("Build %s/%s OK: %s\n", arch, os, binary)
}
|
package leetcode
import (
"fmt"
"math"
)
type calculator struct {
values map[string]int
}
func (c *calculator) sum(nums []int, i, j int) int {
sum := 0
key := fmt.Sprintf("%d-%d", i, j-1)
if value, ok := c.values[key]; ok {
sum = value + nums[j]
} else {
for x := i; x <= j; x++ {
sum += nums[x]
}
}
key = fmt.Sprintf("%d-%d", i, j)
c.values[key] = sum
return sum
}
func maxSubArrayLow(nums []int) int {
c := calculator{values: make(map[string]int)}
n := len(nums)
max := math.MinInt64
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
s := c.sum(nums, i, j)
if s > max {
max = s
}
}
}
return max
}
func maxSubArrayRaw(nums []int) int {
n := len(nums)
sum := 0
max := math.MinInt64
for i := 0; i < n; i++ {
sum += nums[i]
if max < sum {
max = sum
}
if sum < 0 {
sum = 0
}
}
return max
}
func maxSubArray(nums []int) int {
n := len(nums)
dp := make([]int, n)
max := math.MinInt64
for i := 0; i < n; i++ {
if i == 0 {
dp[i] = nums[i]
} else if dp[i-1] <= 0 {
dp[i] = nums[i]
} else {
dp[i] = dp[i-1] + nums[i]
}
if max < dp[i] {
max = dp[i]
}
}
return max
}
|
package main
import (
"fmt"
"os/exec"
)
func main(){
var conn string
conn = "binario_test@tunnel.us-2.checkpoint.security" //CHANGE connection information
var port string
port = "7777" //CHANGE port to be redirected (localhost)
c := exec.Command("ssh", "-i", "cp.pem", "-NL", port+":tunnel:1", conn)
if err := c.Run(); err != nil {
fmt.Println("Eroorrrrr ... ", err)
}
}
|
package containersnapshot
import (
"context"
"testing"
"time"
atomv1alpha1 "github.com/supremind/container-snapshot/pkg/apis/atom/v1alpha1"
"github.com/supremind/container-snapshot/pkg/constants"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
func TestContainerSnapshot(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Containersnapshot Suite")
}
var _ = Describe("snapshot operator", func() {
var (
namespace = "example-ns"
snpKey = types.NamespacedName{Name: "example-snapshot", Namespace: namespace}
now = metav1.Now()
ctx = context.Background()
re = &ReconcileContainerSnapshot{
workerImage: "worker-image:latest",
workerImagePullSecret: "worker-image-pull-secret",
}
simpleSnapshot *atomv1alpha1.ContainerSnapshot
sourcePod *corev1.Pod
)
BeforeEach(func() {
simpleSnapshot = &atomv1alpha1.ContainerSnapshot{
ObjectMeta: metav1.ObjectMeta{Name: "example-snapshot", Namespace: namespace},
Spec: atomv1alpha1.ContainerSnapshotSpec{
PodName: "source-pod",
ContainerName: "source-container",
Image: "reg.example.com/snapshots/example-snapshot:v0.0.1",
ImagePushSecrets: []corev1.LocalObjectReference{{
Name: "my-docker-secret",
}},
},
}
sourcePod = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "source-pod", Namespace: namespace},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "source-container",
Image: "source-image:latest",
},
{
Name: "sidecar-container",
Image: "sidecar-image:latest",
},
},
NodeName: "example-node",
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
StartTime: &metav1.Time{Time: now.Add(-1 * time.Minute)},
ContainerStatuses: []corev1.ContainerStatus{
{
Name: "source-container",
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{
StartedAt: metav1.Time{Time: now.Add(-1 * time.Minute)},
},
},
Ready: true,
Image: "source-image:latest",
ImageID: "docker-pullable:///source-image@sha256:xxxx-source-image",
ContainerID: "docker://xxxx-source-image",
},
{
Name: "sidecar-container",
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{
StartedAt: metav1.Time{Time: now.Add(-1 * time.Minute)},
},
},
Ready: true,
Image: "sidecar-image:latest",
ImageID: "docker-pullable:///sidecar-image@sha256:xxxx-sidecar-image",
ContainerID: "docker://xxxx-sidecar-image",
},
},
},
}
// Register operator types with the runtime scheme.
re.scheme = scheme.Scheme
re.scheme.AddKnownTypes(atomv1alpha1.SchemeGroupVersion, simpleSnapshot)
// Create a fake client to mock API calls.
re.client = &indexFakeClient{fake.NewFakeClientWithScheme(re.scheme)}
})
Context("creating snapshot", func() {
var uid types.UID
JustBeforeEach(func() {
Expect(re.client.Create(ctx, sourcePod)).Should(Succeed())
Expect(re.client.Create(ctx, simpleSnapshot)).Should(Succeed())
snp, e := getSnapshot(ctx, re.client, snpKey)
Expect(e).Should(Succeed())
uid = snp.UID
})
Context("for running source pod", func() {
JustBeforeEach(func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
})
It("should succeed", func() {
Expect(getWorkerState(ctx, re.client, snpKey)).Should(Equal(atomv1alpha1.WorkerCreated))
})
It("should create a worker pod", func() {
out, e := re.getWorkerPod(ctx, namespace, uid)
Expect(e).Should(BeNil())
Expect(out).ShouldNot(BeNil())
Expect(out.Name).Should(HavePrefix("example-snapshot-"))
Expect(out.Namespace).Should(Equal(namespace))
Expect(out.Spec.Containers).Should(HaveLen(1))
Expect(out.Spec.NodeName).Should(Equal("example-node"))
container := out.Spec.Containers[0]
Expect(container.Image).Should(Equal(re.workerImage))
Expect(container.Command).Should(Equal([]string{"container-snapshot-worker"}))
Expect(container.Args).Should(Equal([]string{
"--container", "xxxx-source-image",
"--image", "reg.example.com/snapshots/example-snapshot:v0.0.1",
"--snapshot", "example-snapshot",
}))
})
})
Context("for pending source pod", func() {
BeforeEach(func() {
sourcePod.Status.Phase = corev1.PodPending
})
JustBeforeEach(func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{RequeueAfter: retryLater}))
})
It("should not update worker state", func() {
Expect(getWorkerState(ctx, re.client, snpKey)).Should(BeEmpty())
})
It("should not create any worker pod", func() {
Consistently(func() error {
_, e := re.getWorkerPod(ctx, namespace, uid)
return e
}()).Should(HaveOccurred())
})
})
Context("for failed source pod", func() {
BeforeEach(func() {
sourcePod.Status.Phase = corev1.PodFailed
})
JustBeforeEach(func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
})
It("should fail", func() {
Expect(getWorkerState(ctx, re.client, snpKey)).Should(Equal(atomv1alpha1.WorkerFailed))
})
It("should not create any worker pod", func() {
Consistently(func() error {
_, e := re.getWorkerPod(ctx, namespace, uid)
return e
}()).Should(HaveOccurred())
})
})
Context("for succeeded source pod", func() {
BeforeEach(func() {
sourcePod.Status.Phase = corev1.PodSucceeded
})
JustBeforeEach(func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
})
It("should fail", func() {
Expect(getWorkerState(ctx, re.client, snpKey)).Should(Equal(atomv1alpha1.WorkerFailed))
})
It("should not create any worker pod", func() {
Consistently(func() error {
_, e := re.getWorkerPod(ctx, namespace, uid)
return e
}()).Should(HaveOccurred())
})
})
})
Context("updating snapshot", func() {
var worker *corev1.Pod
BeforeEach(func() {
Expect(re.client.Create(ctx, sourcePod)).Should(Succeed())
Expect(re.client.Create(ctx, simpleSnapshot)).Should(Succeed())
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
snp, e := getSnapshot(ctx, re.client, snpKey)
Expect(e).Should(Succeed())
worker, e = re.getWorkerPod(ctx, namespace, snp.UID)
Expect(e).Should(Succeed())
})
JustBeforeEach(func() {
Expect(re.client.Status().Update(ctx, worker)).Should(Succeed())
})
Context("when worker is running", func() {
BeforeEach(func() {
worker.Status.Phase = corev1.PodRunning
})
It("should update snapshot's workerState to running", func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
Expect(getWorkerState(ctx, re.client, snpKey)).Should(Equal(atomv1alpha1.WorkerRunning))
})
})
Context("when worker succeeds", func() {
BeforeEach(func() {
worker.Status.Phase = corev1.PodSucceeded
})
It("should update snapshot's workerState to complete", func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
Expect(getWorkerState(ctx, re.client, snpKey)).Should(Equal(atomv1alpha1.WorkerComplete))
})
})
Context("when worker fails", func() {
BeforeEach(func() {
worker.Status.Phase = corev1.PodFailed
worker.Status.ContainerStatuses = []corev1.ContainerStatus{{
LastTerminationState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: constants.ExitCodeDockerCommit,
Reason: "Error",
Message: "docker commit failed: blah, blah...",
FinishedAt: metav1.Time{Time: now.Add(1 * time.Minute)},
},
},
}}
})
It("should update snapshot's workerState to failed, and collect condition", func() {
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
snp, e := getSnapshot(ctx, re.client, snpKey)
Expect(e).Should(Succeed())
Expect(snp.Status.Conditions).Should(HaveLen(1))
Expect(snp.Status.Conditions[0].Type).Should(Equal(atomv1alpha1.DockerCommitFailed))
})
})
})
Context("deleting snapshot", func() {
var uid types.UID
BeforeEach(func() {
Expect(re.client.Create(ctx, sourcePod)).Should(Succeed())
Expect(re.client.Create(ctx, simpleSnapshot)).Should(Succeed())
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
snp, e := getSnapshot(ctx, re.client, snpKey)
Expect(e).Should(Succeed())
uid = snp.UID
})
JustBeforeEach(func() {
Eventually(func() error { _, e := re.getWorkerPod(ctx, namespace, uid); return e }).Should(Succeed())
Expect(re.client.Delete(ctx, simpleSnapshot, client.PropagationPolicy(metav1.DeletePropagationForeground))).Should(Succeed())
Expect(re.Reconcile(reconcile.Request{NamespacedName: snpKey})).Should(Equal(reconcile.Result{}))
})
It("should succeed, and delete the worker pod", func() {
Eventually(func() error { _, e := getSnapshot(ctx, re.client, snpKey); return e }).Should(HaveOccurred())
})
// skip it, fake client knows nothing about delete propagation
PIt("should delete the worker pod", func() {
Eventually(func() error { _, e := re.getWorkerPod(ctx, namespace, uid); return e }).Should(HaveOccurred())
})
})
})
func getSnapshot(ctx context.Context, c client.Client, key types.NamespacedName) (*atomv1alpha1.ContainerSnapshot, error) {
snp := atomv1alpha1.ContainerSnapshot{}
e := c.Get(ctx, key, &snp)
if e != nil {
return nil, e
}
return &snp, nil
}
func getWorkerState(ctx context.Context, c client.Client, key types.NamespacedName) (atomv1alpha1.WorkerState, error) {
snp, e := getSnapshot(ctx, c, key)
if e != nil {
return "", e
}
return snp.Status.WorkerState, nil
}
// fake client does not index or fillter objects by owner references, make it do
type indexFakeClient struct {
client.Client
}
func (c *indexFakeClient) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error {
e := c.Client.List(ctx, list, opts...)
if e != nil {
return e
}
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
if listOpts.FieldSelector == nil || listOpts.FieldSelector.Empty() {
return nil
}
objs, e := apimeta.ExtractList(list)
if e != nil {
return e
}
out := make([]runtime.Object, 0)
for _, obj := range objs {
meta, e := apimeta.Accessor(obj)
if e != nil {
continue
}
for _, owner := range meta.GetOwnerReferences() {
if listOpts.FieldSelector.Matches(fields.Set{
"metadata.ownerReferences.uid": string(owner.UID),
}) {
out = append(out, obj)
break
}
}
}
return apimeta.SetList(list, out)
}
|
package main
import (
"test"
"fmt"
)
func main() {
fmt.Printf("This is main!\n")
testapp.Test()
} |
package main
import (
//"fmt"
"funbird-dataReport/compress"
"funbird-dataReport/execl"
db "funbird-dataReport/models"
mail "funbird-dataReport/sendmail"
"log"
"os"
"github.com/WangJiemin/gocomm"
)
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
err := os.Mkdir(path, 0777)
if err != nil {
log.Fatalln("Directory creation failed\n")
}
}
return false, err
}
func main() {
config := gocomm.ReadConfig("./conf/app.cnf")
db.Initialize(config)
PathExists("/tmp/dataReport")
dict_86 := db.Inforall_dataReport("information_all_86a18cd9", "86a18cd9-6a06-4794-b7eb-6ce8d8c57bb0")
execl.Execl_install("/tmp/dataReport/86表.xlsx", dict_86)
//dict_chayi86 := db.InfoallChayi("information_all_86a18cd9", "86a18cd9-6a06-4794-b7eb-6ce8d8c57bb0")
//execl.Execl_install("/tmp/dataReport/86差异表.xlsx", dict_chayi86)
//compress.ComTarGz("/tmp/dataReport", "/tmp/dataReport/dataReport.tar.gz")
/*
f1, err := os.Open("/tmp/dataReport")
if err != nil {
log.Fatalln(err)
}
defer f1.Close()
var files = []*os.File{f1}
*/
f1, err := os.Open("/tmp/dataReport/86表.xlsx")
if err != nil {
log.Fatalln(err)
}
defer f1.Close()
var files = []*os.File{f1}
dest := "/tmp/dataReport.zip"
compress.ComZIP(files, dest)
mail.SendGoMail(config, "数据报表", "/tmp/dataReport.zip")
os.RemoveAll("/tmp/dataReport")
os.Remove("/tmp/dataReport.zip")
}
|
package arriba
import (
"encoding/xml"
"fmt"
)
//Node holds the function name taken from the data-lift attribute and
// the NodeSeq is the raw inner html that we will pass to the function on the data-lift attribute
type Node struct {
FunctionName string `xml:"data-lift,attr"`
NodeSeq string `xml:",innerxml"`
}
func push2Stack(array []Node, stack *Stack) {
for _, item := range array {
stack.Push(item)
}
}
//Result holds a slice of Node values
type Result struct {
XMLName xml.Name
Functions []Node `xml:",any"`
}
//GetFunctions takes the complete html of a page and returns a map of
//function names => html that we should pass to those functions
func GetFunctions(html string) map[string]string {
err, v := marshalNode(html)
if err != nil {
fmt.Printf("Error 1: %v\n\n", err)
return nil
}
return loop(v)
}
func marshalNode(html string) (error, Result) {
v := Result{}
//horrible hack to get the complete html that is inside the node, otherwise we only get child nodes and miss data
err := xml.Unmarshal([]byte("<p>"+html+"</p>"), &v)
if err != nil {
return err, v
}
return nil, v
}
func loop(v Result) map[string]string {
var stack = new(Stack)
var functionMap = make(map[string]string)
// Stack initialization with array elements
push2Stack(v.Functions, stack)
for stack.Size() > 0 {
innerNode := stack.Pop().(Node)
if innerNode.FunctionName != "" {
functionMap[innerNode.FunctionName] = innerNode.NodeSeq
}
err, node := marshalNode(innerNode.NodeSeq)
if err != nil {
fmt.Printf("Error 2: %v ==>> %v\n\n", innerNode.NodeSeq, err)
}
// we have more html, add the pending nodes to the stack
push2Stack(node.Functions, stack)
}
return functionMap
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"otherside/api/seatgeekLayer"
"otherside/api/spotifyLayer"
"reflect"
"strings"
"time"
"otherside/api/redisLayer"
"github.com/gorilla/mux"
"github.com/zmb3/spotify"
)
type TopTrackResponse struct {
Track spotify.FullTrack
ArtistExists bool
err error
}
type ArtistIDResponse struct {
ID spotify.ID
Name string
ImageURL string
err error
}
var applicationPort = os.Getenv("PORT")
var clientOrigin = os.Getenv("CLIENT_APPLICATION_URL")
var timeToday seatgeekLayer.TimeToday
var cityPostcodeMap map[string]string
var availableGenres []string
func main() {
cityPostcodeMap = generateCityPostcodeMap()
availableGenres = generateGenres()
router := mux.NewRouter().StrictSlash(true)
router.HandleFunc("/cities", Cities)
router.HandleFunc("/genres", Genres)
router.HandleFunc("/authenticate", Authenticate)
router.HandleFunc("/callback", Callback)
router.HandleFunc("/localevents", LocalEvents)
router.HandleFunc("/user", User)
router.HandleFunc("/toptracks", TopTracks).Methods("POST", "OPTIONS")
router.HandleFunc("/artistids", ArtistIDs).Methods("POST", "OPTIONS")
router.HandleFunc("/buildplaylist", BuildPlaylist).Methods("POST", "OPTIONS")
fmt.Printf("Starting server on port %s\n", applicationPort)
log.Fatal(http.ListenAndServe(":"+applicationPort, router))
}
func generateCityPostcodeMap() map[string]string {
postCodeMap := map[string]string{
"Austin TX": "78759",
"Atlanta GA": "30301",
"Washington DC": "20001",
"Nashville TN": "37011",
"Las Vegas NV": "88901",
"New Haven CT": "06501",
"Buffalo NY": "14201",
"Troy NY": "12180",
"Kansas City MO": "64030",
"Tulsa OK": "74008",
"Denver CO": "80014",
"Omaha NE": "68007",
"San Diego CA": "91945",
"Boston MA": "02101",
"Indianapolis IN": "46077",
"Pittsburgh PA": "15106",
"St Louis MO": "63101",
"New Orleans LA": "70032",
"Detroit MI": "48127",
"Louisville KY": "40018",
"San Francisco CA": "94016",
"Norfolk VA": "23324",
"Cincinatti OH": "45203",
"Birmingham AL": "35005",
"Charlotte NC": "28105",
"Des Moines IA": "50047",
"Philadelphia PA": "19093",
"Chicago IL": "60007",
"Houston TX": "77001",
"Dallas TX": "75043",
}
return postCodeMap
}
func generateGenres() []string {
genres := []string{
"rock",
"hard-rock",
"indie",
"hip-hop",
"jazz",
"pop",
"soul",
"rnb",
"alternative",
"classic-rock",
"country",
"folk",
"punk",
"electronic",
"blues",
"techno",
"rap",
"latin",
"classical",
}
return genres
}
//GET
func Cities(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
var cities []string
for key := range cityPostcodeMap {
cities = append(cities, key)
}
citiesJSON, err := json.Marshal(cities)
if err != nil {
fmt.Printf("Error Marshalling city keys: " + err.Error())
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(citiesJSON)
return
}
}
//GET
func Genres(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
genresJSON, err := json.Marshal(availableGenres)
if err != nil {
fmt.Printf("Error Marshalling city keys: " + err.Error())
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(genresJSON)
return
}
}
//GET
func LocalEvents(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
cities, ok := r.URL.Query()["cities"]
if !ok || len(cities) < 1 {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Cities array parameter missing from request.")))
return
}
genres, ok := r.URL.Query()["genres"]
if !ok || len(genres) < 1 {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Genres array parameter missing from request.")))
return
}
citiesArray := QueryStringToArray(cities[0])
genreArray := QueryStringToArray(genres[0])
var postCodeArray []string
for _, val := range citiesArray {
postCodeArray = append(postCodeArray, cityPostcodeMap[val])
}
localSeatGeekEvents := seatgeekLayer.FindLocalEvents(postCodeArray, genreArray, timeToday)
localSeatGeekEventsJSON, err := json.Marshal(localSeatGeekEvents)
if err != nil {
fmt.Printf("Error Marshalling localseatgeekevents data: " + err.Error())
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(localSeatGeekEventsJSON)
return
}
}
func QueryStringToArray(queryString string) []string {
testsArray := strings.Split(strings.Trim(queryString, "[]"), ",")
return testsArray
}
//GET
func User(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
token := ExtractTokenFromHeader(r)
currentUser, err := spotifyLayer.GetCurrentUser(token)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Error obtaining current user")))
return
}
currentUserJSON, err := json.Marshal(currentUser)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Error marshaling spotify user %s", currentUser)))
return
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(currentUserJSON)
return
}
}
//POST
func ArtistIDs(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
time.Sleep(200 * time.Millisecond)
token := ExtractTokenFromHeader(r)
var localSeatGeekEvents []seatgeekLayer.SeatGeekEvent
var artists []spotifyLayer.SpotifyArtistImage
err := json.NewDecoder(r.Body).Decode(&localSeatGeekEvents)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error decoding local seatgeek events: " + err.Error()))
return
}
var artistChannels []chan ArtistIDResponse
t4 := time.Now()
for _, event := range localSeatGeekEvents {
for _, performer := range event.Performers {
artistChan := make(chan ArtistIDResponse)
artistChannels = append(artistChannels, artistChan)
go GetArtistID(token, performer, artistChan)
}
}
cases := make([]reflect.SelectCase, len(artistChannels))
for i, artistChan := range artistChannels {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(artistChan)}
}
remainingCases := len(cases)
for remainingCases > 0 {
chosen, value, ok := reflect.Select(cases)
if !ok {
//Channel has been closed; zero out channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
remainingCases--
continue
}
response := value.Interface().(ArtistIDResponse)
if response.err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error getting artist ID for spotify artist"))
return
} else {
var newArtist spotifyLayer.SpotifyArtistImage
newArtist.Id = response.ID
newArtist.Name = response.Name
newArtist.ImageURL = response.ImageURL
artists = append(artists, newArtist)
}
}
fmt.Println("[Time benchmark] Artist IDs " + time.Since(t4).String())
artistsJSON, err := json.Marshal(artists)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Error marshaling spotify artist IDs")))
return
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(artistsJSON)
return
}
}
//POST
func TopTracks(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
time.Sleep(200 * time.Millisecond)
var artistIDs []spotify.ID
var topTracks []spotify.FullTrack
err := json.NewDecoder(r.Body).Decode(&artistIDs)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error decoding Spotify Artist IDs: " + err.Error() + "\n"))
return
}
var topTrackChannels []chan TopTrackResponse
token := ExtractTokenFromHeader(r)
t4 := time.Now()
for _, ID := range artistIDs {
topTrackChan := make(chan TopTrackResponse)
topTrackChannels = append(topTrackChannels, topTrackChan)
go GetArtistTopTrack(token, ID, topTrackChan)
}
cases := make([]reflect.SelectCase, len(topTrackChannels))
for i, topTrackChan := range topTrackChannels {
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(topTrackChan)}
}
remainingCases := len(cases)
for remainingCases > 0 {
chosen, value, ok := reflect.Select(cases)
if !ok {
//Channel has been closed; zero out channel to disable the case
cases[chosen].Chan = reflect.ValueOf(nil)
remainingCases--
continue
}
response := value.Interface().(TopTrackResponse)
if response.err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error getting top track for spotify artist"))
return
} else {
if response.ArtistExists {
topTracks = append(topTracks, response.Track)
}
}
}
fmt.Println("[Time benchmark] Top tracks " + time.Since(t4).String())
topTracksJSON, err := json.Marshal(topTracks)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Error marshaling spotify top tracks")))
return
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(topTracksJSON)
return
}
}
func GetArtistID(token string, performer string, artistChan chan<- ArtistIDResponse) {
var response ArtistIDResponse
artist, err := spotifyLayer.SearchAndFindSpotifyArtistID(token, performer)
if err != nil {
response.err = err
artistChan <- response
close(artistChan)
} else {
response.ID = artist.Id
response.Name = artist.Name
response.ImageURL = artist.ImageURL
response.err = err
artistChan <- response
close(artistChan)
}
}
func GetArtistTopTrack(token string, artistID spotify.ID, topTrackChan chan<- TopTrackResponse) {
var response TopTrackResponse
if artistID != "" {
topArtistTrack, err := spotifyLayer.GetTopSpotifyArtistTrack(token, artistID)
response.Track = topArtistTrack
response.ArtistExists = true
response.err = err
topTrackChan <- response
close(topTrackChan)
} else {
response.ArtistExists = false
response.err = nil
topTrackChan <- response
close(topTrackChan)
}
}
//POST
func BuildPlaylist(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
playlistName, ok := r.URL.Query()["name"]
if !ok || len(playlistName) < 1 {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("name parameter missing from request."))
return
}
playlistDesc, ok := r.URL.Query()["desc"]
if !ok || len(playlistDesc) < 1 {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("desc parameter missing from request."))
return
}
var topTracks []spotify.FullTrack
err := json.NewDecoder(r.Body).Decode(&topTracks)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error decoding Spotify Top Tracks: " + err.Error()))
return
}
token := ExtractTokenFromHeader(r)
playlistID, err := spotifyLayer.GeneratePlayList(token, playlistName[0], playlistDesc[0])
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error generating playlist: " + err.Error()))
return
}
err = spotifyLayer.AddTracksToPlaylist(token, playlistID, topTracks)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Error adding tracks to playlist: " + err.Error()))
return
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("Playlist generated."))
return
}
}
//GET
func Authenticate(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
UTCTimeLocation, err := time.LoadLocation("UTC")
if err != nil {
fmt.Printf("Error creating time LoadLocation: " + err.Error())
}
if timeToday.EndOfDay.Sub(time.Now().In(UTCTimeLocation)) < 0 {
redisLayer.FlushDb()
timeToday = seatgeekLayer.GetTimeToday(UTCTimeLocation)
}
state, ok := r.URL.Query()["state"]
if !ok || len(state[0]) < 1 {
fmt.Printf("State parameter missing from authenticate request.")
}
authenticationUrl := spotifyLayer.ObtainAuthenticationURL(state[0])
fmt.Fprint(w, authenticationUrl)
}
//GET
//Callback is called from the Spotify authentication flow, and redirects to <Host>/#/callback
func Callback(w http.ResponseWriter, r *http.Request) {
state, ok := r.URL.Query()["state"]
if !ok || len(state) < 1 {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("State parameter missing from request.")))
return
}
accessToken, err := spotifyLayer.SetNewSpotifyClient(w, r, state[0])
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("Error setting new spotify client: " + err.Error())))
return
}
redirectURL := clientOrigin + "?state=" + state[0] + "&token=" + accessToken
http.Redirect(w, r, redirectURL, http.StatusSeeOther)
}
func ExtractTokenFromHeader(r *http.Request) string {
tokenHeader := r.Header.Get("Authorization")
return tokenHeader[7:]
}
func enableCors(w *http.ResponseWriter) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
(*w).Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
(*w).Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// ShardsSegment type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/indices/segments/types.ts#L46-L51
type ShardsSegment struct {
NumCommittedSegments int `json:"num_committed_segments"`
NumSearchSegments int `json:"num_search_segments"`
Routing ShardSegmentRouting `json:"routing"`
Segments map[string]Segment `json:"segments"`
}
func (s *ShardsSegment) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "num_committed_segments":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.NumCommittedSegments = value
case float64:
f := int(v)
s.NumCommittedSegments = f
}
case "num_search_segments":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.NumSearchSegments = value
case float64:
f := int(v)
s.NumSearchSegments = f
}
case "routing":
if err := dec.Decode(&s.Routing); err != nil {
return err
}
case "segments":
if s.Segments == nil {
s.Segments = make(map[string]Segment, 0)
}
if err := dec.Decode(&s.Segments); err != nil {
return err
}
}
}
return nil
}
// NewShardsSegment returns a ShardsSegment.
func NewShardsSegment() *ShardsSegment {
r := &ShardsSegment{
Segments: make(map[string]Segment, 0),
}
return r
}
|
package todolist
import (
"crypto/rand"
"fmt"
"io"
"math"
"strings"
"time"
)
func AddIfNotThere(arr []string, items []string) []string {
for _, item := range items {
there := false
for _, arrItem := range arr {
if item == arrItem {
there = true
}
}
if !there {
arr = append(arr, item)
}
}
return arr
}
func AddTodoIfNotThere(arr []*Todo, item *Todo) []*Todo {
there := false
for _, arrItem := range arr {
if item.Id == arrItem.Id {
there = true
}
}
if !there {
arr = append(arr, item)
}
return arr
}
func bod(t time.Time) time.Time {
year, month, day := t.Date()
return time.Date(year, month, day, 0, 0, 0, 0, t.Location())
}
func bom(t time.Time) time.Time {
for {
if t.Day() != 1 {
t = t.AddDate(0, 0, -1)
} else {
return bod(t)
}
}
}
func bow(t time.Time) time.Time {
for {
if t.Weekday() != time.Sunday {
t = t.AddDate(0, 0, -1)
} else {
return bod(t)
}
}
}
func mostRecentSunday(t time.Time) time.Time {
for {
if t.Weekday() != time.Sunday {
t = t.AddDate(0, 0, -1)
} else {
return t
}
}
}
func mostRecentMonday(t time.Time) time.Time {
for {
if t.Weekday() != time.Monday {
t = t.AddDate(0, 0, -1)
} else {
return t
}
}
}
func monday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func tuesday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day).AddDate(0, 0, 1)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func wednesday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day).AddDate(0, 0, 2)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func thursday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day).AddDate(0, 0, 3)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func friday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day).AddDate(0, 0, 4)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func saturday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day).AddDate(0, 0, 5)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func sunday(day time.Time, forward bool) time.Time {
dow := mostRecentMonday(day).AddDate(0, 0, 6)
if forward {
return thisOrNextWeek(dow, day)
}
return thisOrLastWeek(dow, day)
}
func thisOrNextWeek(day time.Time, pivotDay time.Time) time.Time {
if day.Before(pivotDay) {
return bod(day.AddDate(0, 0, 7))
} else {
return bod(day)
}
}
func thisOrLastWeek(day time.Time, pivotDay time.Time) time.Time {
if day.After(pivotDay) {
return bod(day.AddDate(0, 0, -7))
} else {
return bod(day)
}
}
func pluralize(count int, singular, plural string) string {
if count > 1 {
return plural
}
return singular
}
func isToday(t time.Time) bool {
nowYear, nowMonth, nowDay := Now.Date()
timeYear, timeMonth, timeDay := t.Date()
return nowYear == timeYear &&
nowMonth == timeMonth &&
nowDay == timeDay
}
func isTomorrow(t time.Time) bool {
nowYear, nowMonth, nowDay := Now.AddDate(0, 0, 1).Date()
timeYear, timeMonth, timeDay := t.Date()
return nowYear == timeYear &&
nowMonth == timeMonth &&
nowDay == timeDay
}
func isPastDue(t time.Time) bool {
return Now.After(t)
}
func translateToDates(t time.Time, vals ...string) []time.Time {
times := []time.Time{}
p := Parser{}
for i, val := range vals {
//Interpret blank values to support filter for due after and due before
if val == "" {
if i == 0 {
//Treat blank begin date as an indefinite past date (-100 years)
times = append(times, bod(t).AddDate(-100, 0, 0))
continue
} else if i == 1 {
//Treat blank end date as an indefinite future date (+100 years)
times = append(times, bod(t).AddDate(100, 0, 0))
continue
}
}
switch {
case strings.HasPrefix(val, "this_week"):
begin := bow(t)
end := begin.AddDate(0, 0, 7)
times = append(times, begin, end)
break
case strings.HasPrefix(val, "next_week"):
begin := bow(t).AddDate(0, 0, 7)
end := begin.AddDate(0, 0, 7)
times = append(times, begin, end)
break
case strings.HasPrefix(val, "last_week"):
begin := bow(t).AddDate(0, 0, -7)
end := begin.AddDate(0, 0, 7)
times = append(times, begin, end)
break
default:
//If not blank or one of the range terms, parse for day of week or relative references
t2 := p.ParseDateTime(val, t)
times = append(times, t2)
}
}
return times
}
func inSliceOneNotSliceTwo(s1, s2 []string) []string {
// difference returns the elements in s1 that aren't in s2
ms2 := map[string]bool{} //map of slice 2 elements
for _, x := range s2 {
ms2[x] = true
}
res := []string{} //result slice to contain s1 elements not in s2
for _, x := range s1 {
if _, ok := ms2[x]; !ok {
res = append(res, x)
}
}
return res
}
func getModifiedTime(todo *Todo) time.Time {
if len(todo.ModifiedDate) > 0 {
modTime, rerr := time.Parse(time.RFC3339, todo.ModifiedDate)
if rerr != nil {
createTime, _ := time.Parse(time.RFC3339, todo.CreatedDate)
return createTime
}
return modTime
}
return Now
}
func stringToTime(val string) time.Time {
if val != "" {
parsedTime, _ := time.Parse(time.RFC3339, val)
return parsedTime
} else {
parsedTime, _ := time.Parse(time.RFC3339, "1900-01-01T00:00:00+00:00")
return parsedTime
}
}
func timeToString(val time.Time) string {
formatted := val.Format(time.RFC3339)
return formatted
}
func timeToSimpleDateString(val time.Time) string {
return val.Format("2006-01-02")
}
// newUUID generates a random UUID according to RFC 4122
func newUUID() (string, error) {
uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid)
if n != len(uuid) || err != nil {
return "", err
}
// variant bits; see section 4.1.1
uuid[8] = uuid[8]&^0xc0 | 0x80
// version 4 (pseudo-random); see section 4.1.3
uuid[6] = uuid[6]&^0xf0 | 0x40
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil
}
//Calc exec order for all todos
//Normalize values to between 0 and 1.
func calcAllExecOrder(todos []*Todo) {
//Get priority values
priorityMap := Priority
numPs := len(priorityMap)
//Enforce maximum spacing of 10 if too few priority levels.
//Too few levels will yield low priority values that never rise to top
spacing := numPs
if numPs < 10 {
spacing = 10
}
for _, t := range todos {
var p int
var ok bool
if p, ok = priorityMap[t.Priority]; !ok {
p = numPs //sort unknown priority values to last
}
p = 100 - (p * spacing) //assumes num of priorities is less than 10.
calcExecOrder(t, (float64(p) / 100.0))
}
}
func calcExecOrder(t *Todo, p float64) {
//ExecOrder calculated only if needed. Needed in multiple places, so check
//if already calculated.
if t.ExecOrder == 0 {
//Get days til due values
d := 0
if len(t.Due) > 0 {
tmpTime, err := time.Parse(time.RFC3339, t.Due)
if err == nil {
dueTime := tmpTime.Unix()
now := Now.Unix()
diff := now - dueTime
d = int(math.Abs(float64(diff) / (60 * 60 * 24)))
if d < 1 {
d = 1
}
}
}
//Calc execution order: priority * (effort days / due (available) days)
//(effort days / due (available) days) gives a percentage
//priority will be a value between .01 and 1
//Result will be a pct of a pct
if t.EffortDays == 0 {
t.EffortDays = 1
}
t.ExecOrder = float64(p) * (t.EffortDays / float64(d))
if t.ExecOrder > .999 {
t.ExecOrder = .999
}
}
}
|
package data
import (
"github.com/bububa/oppo-omni/model"
)
type QQuickAppGameListResponse struct {
model.BaseResponse
Data *QQuickAppGameListResult `json:"data,omitemtpy"`
}
type QQuickAppGameListResult struct {
ItemCount int64 `json:"itemCount,omitempty"`
TotalCount int64 `json:"totalCount,omitempty"`
Items []QQuickAppGameListItem `json:"items,omitempty"`
}
type QQuickAppGameListItem struct {
StatTime int64 `json:"statTime,omitempty"` // 流水时间,unixtime
PlanID uint64 `json:"planId,omitempty"` // 计划ID
PlanName string `json:"planName,omitempty"` // 计划名称
GroupID uint64 `json:"groupId,omitempty"` // 广告组ID
GroupName string `json:"groupName,omitempty"` // 广告组名
AdID uint64 `json:"adId,omitempty"` // 广告ID
AdName string `json:"adName,omitempty"` // 广告名称
Keywords string `json:"keywords,omitempty"` // 关键词
AppID int64 `json:"appId,omitempty"` // 应用ID
AppName string `json:"appName,omitempty"` // 应用名称
AccCost int64 `json:"accCost,omitempty"` // 消耗(单位:分)
Ecpm string `json:"ecpm,omitempty"` // ECPM
AdPrice int64 `json:"adPrice,omitempty"` // 广告实扣金额
ExposeNums int64 `json:"exposeNums,omitempty"` // 曝光次数
ClickNums int64 `json:"clickNums,omitempty"` // 点击次数
ClickRate string `json:"clickRate,omitempty"` // 点击率
ClickPrice string `json:"clickPrice,omitempty"` // 点击均价
ActiveNums int64 `json:"activeNums,omitempty"` // 激活人数
ActiveRate string `json:"activeRate,omitempty"` // 激活率
ActivePrice string `json:"activePrice,omitempty"` // 激活成本
NewUserBuyLtv1 float64 `json:"newUserBuyLtv1,omitempty"` // 广告变现LTV1
NewUserBuyLtv3 float64 `json:"newUserBuyLtv3,omitempty"` // 广告变现LTV3
NewUserBuyLtv7 float64 `json:"newUserBuyLtv7,omitempty"` // 广告变现LTV7
NewUserBuyLtv30 float64 `json:"newUserBuyLtv30,omitempty"` // 广告变现LTV30
NewUserBuyRoi1 string `json:"newUserBuyRoi1,omitempty"` // 广告变现ROI1
NewUserBuyRoi3 string `json:"newUserBuyRoi3,omitempty"` // 广告变现ROI3
NewUserBuyRoi7 string `json:"newUserBuyRoi7,omitempty"` // 广告变现ROI7
NewUserBuyRoi30 string `json:"newUserBuyRoi30,omitempty"` // 广告变现ROI30
BuyUserIncome0 float64 `json:"buyUserIncome0,omitempty"` // 首日广告收入
NewPayUserNums int64 `json:"newPayUserNums,omitempty"` // 新增付费人数
PayTransferRate string `json:"payTransferRate,omitempty"` // 付费转化率
PayCost float64 `json:"payCost,omitempty"` // 付费成本
}
|
package models
// OperateChan is operation channel
var OperateChan chan OperateModel
// OperateModel is struct for set
type OperateModel struct {
Operate string
Key string
Value []byte
}
|
package inject
import (
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"testing"
)
func Test_datadogMutator_mutate(t *testing.T) {
cpuLimits, _ := resource.ParseQuantity("100m")
cpuRequests, _ := resource.ParseQuantity("10m")
memoryLimits, _ := resource.ParseQuantity("64Mi")
memoryRequests, _ := resource.ParseQuantity("32Mi")
type fields struct {
mutatorConfig datadogMutatorConfig
enabled bool
}
type args struct {
pod *corev1.Pod
}
tests := []struct {
name string
fields fields
args args
wantPod *corev1.Pod
wantErr error
}{
{
name: "no-op when disabled",
fields: fields{
mutatorConfig: datadogMutatorConfig{
datadogAddress: "127.0.0.1",
datadogPort: "8080",
},
enabled: false,
},
args: args{
pod: &corev1.Pod{
Spec: corev1.PodSpec{},
},
},
wantPod: &corev1.Pod{
Spec: corev1.PodSpec{},
},
},
{
name: "no-op when already contain envoy tracing config volume",
fields: fields{
mutatorConfig: datadogMutatorConfig{
datadogAddress: "127.0.0.1",
datadogPort: "8080",
},
enabled: true,
},
args: args{
pod: &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: envoyTracingConfigVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
},
},
},
wantPod: &corev1.Pod{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: envoyTracingConfigVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
},
},
},
{
name: "inject sidecar and volume",
fields: fields{
mutatorConfig: datadogMutatorConfig{
datadogAddress: "127.0.0.1",
datadogPort: "8080",
},
enabled: true,
},
args: args{
pod: &corev1.Pod{
Spec: corev1.PodSpec{},
},
},
wantPod: &corev1.Pod{
Spec: corev1.PodSpec{
InitContainers: []corev1.Container{
{
Name: "inject-datadog-config",
Image: "busybox",
ImagePullPolicy: "IfNotPresent",
Command: []string{
"sh",
"-c",
`cat <<EOF >> /tmp/envoy/envoyconf.yaml
tracing:
http:
name: envoy.tracers.datadog
config:
collector_cluster: datadog_agent
service_name: envoy
static_resources:
clusters:
- name: datadog_agent
connect_timeout: 1s
type: strict_dns
lb_policy: round_robin
load_assignment:
cluster_name: datadog_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 8080
EOF
cat /tmp/envoy/envoyconf.yaml
`,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: envoyTracingConfigVolumeName,
MountPath: "/tmp/envoy",
},
},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
"cpu": cpuLimits,
"memory": memoryLimits,
},
Requests: corev1.ResourceList{
"cpu": cpuRequests,
"memory": memoryRequests,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: envoyTracingConfigVolumeName,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &datadogMutator{
mutatorConfig: tt.fields.mutatorConfig,
enabled: tt.fields.enabled,
}
pod := tt.args.pod.DeepCopy()
err := m.mutate(pod)
if tt.wantErr != nil {
assert.EqualError(t, err, tt.wantErr.Error())
} else {
assert.NoError(t, err)
assert.True(t, cmp.Equal(tt.wantPod, pod), "diff", cmp.Diff(tt.wantPod, pod))
}
})
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.