text
stringlengths 11
4.05M
|
|---|
package main
import (
"fmt"
"os"
)
func first() {
fmt.Println("1st")
}
func second() {
fmt.Println("2nd")
}
func main() {
/* defer จะใช้กำหนดฟังก์ชันที่ถูกเรียกใช้งาน
เมื่อฟังก์ชันหลักที่ครอบ defer อยู่ทำงานเสร็จ*/
// ผลลัพธ์จะได้ 1st ตามด้วย 2nd
defer second()
first()
// run function
testDefer()
}
// defer มักเอาไปใช้คืนค่าของทรัพยากรระบบเมื่อใช้งานเสร็จแล้ว
func testDefer() {
// ทำการเปิดไฟล์
f, _ := os.Open(filename)
// ปิดเมื่อเลิกใช้
defer f.Close()
}
|
package dbsrv
import (
"github.com/empirefox/esecend/front"
"github.com/empirefox/reform"
"github.com/mcuadros/go-defaults"
"gopkg.in/doug-martin/goqu.v3"
)
func (s *DbService) SaveProfile(p *front.Profile) error {
p.ID = 1
if err := s.GetDB().Update(p); err != nil {
if err != reform.ErrNoRows {
return err
}
if err = s.insertProfile(p); err != nil {
return err
}
}
s.SetProfile(p)
return nil
}
// must be exec after DbService created
func (s *DbService) LoadProfile() error {
p := new(front.Profile)
if err := s.GetDB().FindByPrimaryKeyTo(p, 1); err != nil {
if err != reform.ErrNoRows {
return err
}
defaults.SetDefaults(p)
if err = s.insertProfile(p); err != nil {
return err
}
}
s.SetProfile(p)
return nil
}
func (s *DbService) insertProfile(p *front.Profile) error {
db := s.GetDB()
if err := db.Insert(p); err != nil {
return err
}
if p.ID != 1 {
sql, args, err := s.DS.From(front.ProfileTable.Name()).
Where(goqu.I(front.ProfileTable.PK()).Eq(p.ID)).
ToUpdateSql(map[string]interface{}{front.ProfileTable.PK(): 1})
if err != nil {
return err
}
_, err = db.DsExec(front.ProfileTable, sql, args...)
if err != nil {
return err
}
}
return nil
}
func (s *DbService) SetProfile(p *front.Profile) {
s.muProfile.Lock()
s.profile = *p
s.muProfile.Unlock()
}
func (s *DbService) Profile() front.Profile {
s.muProfile.RLock()
defer s.muProfile.RUnlock()
return s.profile
}
|
package main
import "log"
import zmq "github.com/pebbe/zmq3"
import "fmt"
import "time"
import "sync"
//create router socket and just print received messages
func rrecv(socket *zmq.Socket) {
log.Println("hooking up router...")
socket.SetIdentity("router")
socket.Bind("tcp://127.0.0.1:9999")
for {
header, _ := socket.Recv(0)
body, _ := socket.Recv(0)
log.Println("router received from ", header, " body: ", body)
}
}
//messages received from router
//router doesnt send anything this will not output
func drecv(socket *zmq.Socket, m *sync.Mutex) {
poller := zmq.NewPoller()
poller.Add(socket, zmq.POLLIN)
for {
sockets, _ := poller.Poll(-1)
for _, socket := range sockets {
m.Lock()
msg, _ := socket.Socket.Recv(0)
log.Println("drecv ", msg)
m.Unlock()
}
}
}
//just pumps messages via channel to the hub
//to send messages to router
func dealer(id string, ch chan string) {
for {
ch <- fmt.Sprintf("%s:*************************", id)
}
}
func hub(ch chan string, m *sync.Mutex) {
dealer, _ := zmq.NewSocket(zmq.DEALER)
dealer.SetIdentity("dealer")
dealer.Connect("tcp://127.0.0.1:9999")
go drecv(dealer, m)
for {
msg := <- ch
log.Println("hub: ", msg)
m.Lock()
dealer.SendMessageDontwait(msg)
m.Unlock()
}
}
func main() {
m := &sync.Mutex{}
//create router
router, _ := zmq.NewSocket(zmq.ROUTER)
//deal, _ := zmq.NewSocket(zmq.DEALER)
ch := make(chan string)
go hub(ch, m)
go rrecv(router)
go dealer("dealerA", ch)
go dealer("dealerB", ch)
go dealer("dealerC", ch)
time.Sleep(3000 * time.Second)
}
|
package msaevents
import (
"encoding/json"
"fmt"
)
type EventType string
const (
EventTypeCreatedUser EventType = "CREATED_USER"
EventTypeUpdatedUser EventType = "UPDATED_USER"
EventTypeCreatedPasswordLost EventType = "CREATED_PASSWORD_LOST"
EventTypeCreatedWall EventType = "CREATED_WALL"
EventTypeCreatedPrivateMessage EventType = "CREATED_PRIVATE_MESSAGE"
EventTypeCreatedComment EventType = "CREATED_COMMENT"
EventTypeCreatedLike EventType = "CREATED_LIKE"
EventTypeCreatedFriendRequest EventType = "CREATED_FRIEND_REQUEST"
EventTypeCreatedFriend EventType = "CREATED_FRIEND"
EventTypeCreatedPhoto EventType = "CREATED_PHOTO"
EventTypeContentAbuseReport EventType = "CONTENT_ABUSE_REPORT"
EventTypeUserRegistrationGranted EventType = "USER_REGISTRATION_GRANTED"
EventTypeUserAccountEnabled EventType = "USER_ACCOUNT_ENABLED"
EventTypeserAccountDisabled EventType = "USER_ACCOUNT_DISABLED"
EventTypeTrackingLinkClick EventType = "TRACKING_LINK_CLICK"
)
var (
Events = map[EventType]interface{}{
EventTypeCreatedUser: EventCreatedUser{},
}
)
type Event struct {
ConfigId string `json:"config_id"`
EventType EventType `json:"event_type"`
}
func (e *Event) Unmarshal(data []byte) (interface{}, error) {
if t, ok := Events[e.EventType]; ok {
err := json.Unmarshal(data, &t)
return t, err
}
return nil, fmt.Errorf("fail to found interface for type '%s'", e.EventType)
}
|
package db
import (
"strconv"
"time"
"cloud.google.com/go/datastore"
"github.com/steam-authority/steam-authority/helpers"
)
type Change struct {
CreatedAt time.Time `datastore:"created_at,noindex"`
ChangeID int `datastore:"change_id"`
Apps []ChangeItem `datastore:"apps,noindex"`
Packages []ChangeItem `datastore:"packages,noindex"`
}
type ChangeItem struct {
ID int `json:"id"`
Name string `json:"name"`
}
func (change Change) GetKey() (key *datastore.Key) {
return datastore.NameKey(KindChange, strconv.Itoa(change.ChangeID), nil)
}
func (change Change) GetName() (name string) {
return "Change " + strconv.Itoa(change.ChangeID)
}
func (change Change) GetTimestamp() (int64) {
return change.CreatedAt.Unix()
}
func (change Change) GetNiceDate() (string) {
return change.CreatedAt.Format(helpers.DateYearTime)
}
func (change Change) GetPath() string {
return "/changes/" + strconv.Itoa(change.ChangeID)
}
func (change Change) GetAppIDs() (ids []int) {
for _, v := range change.Apps {
ids = append(ids, v.ID)
}
return ids
}
func (change Change) GetPackageIDs() (ids []int) {
for _, v := range change.Packages {
ids = append(ids, v.ID)
}
return ids
}
func (change Change) OutputForJSON() (output []interface{}) {
return []interface{}{
change.ChangeID,
change.CreatedAt.Unix(),
change.CreatedAt.Format(helpers.DateYearTime),
change.Apps,
change.Packages,
}
}
func GetChange(id string) (change Change, err error) {
client, context, err := GetDSClient()
if err != nil {
return change, err
}
key := datastore.NameKey(KindChange, id, nil)
change = Change{}
err = client.Get(context, key, &change)
if err != nil {
if err2, ok := err.(*datastore.ErrFieldMismatch); ok {
old := []string{
"updated_at",
}
if !helpers.SliceHasString(old, err2.FieldName) {
return change, err2
}
} else {
return change, err
}
}
return change, nil
}
func checkForMissingChangeFields(err error) error {
if err == nil {
return nil
}
if err2, ok := err.(*datastore.ErrFieldMismatch); ok {
removedColumns := []string{
"updated_at",
"apps",
"packages",
}
if helpers.SliceHasString(removedColumns, err2.FieldName) {
return nil
}
}
return err
}
|
package main
import (
"os"
)
func dirExists(path string) bool {
stat, err := os.Stat(path)
if err != nil {
return false
}
if stat.IsDir() == false {
return false
}
return true
}
func fileExists(path string) bool {
stat, err := os.Stat(path)
if err != nil {
return false
}
if stat.IsDir() == true {
return false
}
return true
}
|
package main
import (
"context"
"google.golang.org/protobuf/encoding/protojson"
"log"
"net"
"net/http"
"strconv"
tdlpb "github.com/FunnyDevP/example-grpc-gateway/api/proto/todolist"
td "github.com/FunnyDevP/example-grpc-gateway/internal/todolist"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
)
func main() {
// Create a listener on TCP port
lis, err := net.Listen("tcp",":8080")
if err != nil {
log.Fatalln("Failed to listen:",err)
}
// Create a gRPC server object
s := grpc.NewServer()
// Attach the todolist service to the server
tdlpb.RegisterTodolistServiceServer(s,td.NewHandler())
// Serve gRPC server
log.Println("Serving gRPC on localhost:8080")
go func() {
log.Fatalln(s.Serve(lis))
}()
// Create a client connection to the gRPC server we just started
// This is where the gRPC-Gateway proxies the requests
conn, err := grpc.DialContext(
context.Background(),
"localhost:8080",
grpc.WithBlock(),
grpc.WithInsecure(),
)
if err != nil {
log.Fatalln("Failed to dial server:",err)
}
gwmux := runtime.NewServeMux(
runtime.WithMarshalerOption(runtime.MIMEWildcard,&runtime.JSONPb{
MarshalOptions: protojson.MarshalOptions{
UseProtoNames: true,
},
}),
runtime.WithIncomingHeaderMatcher(func(s string) (string, bool) {
return s,true
}),
runtime.WithForwardResponseOption(httpResponseCodeModifier),
)
// register http handler
if err := tdlpb.RegisterTodolistServiceHandler(context.Background(),gwmux,conn); err != nil {
log.Fatalln("Failed to register gateway:",err)
}
gwServer := &http.Server{
Addr: ":8090",
Handler: gwmux,
}
log.Println("Serving gRPC-Gateway on http://0.0.0.0:8090")
log.Fatalln(gwServer.ListenAndServe())
}
func httpResponseCodeModifier(ctx context.Context, w http.ResponseWriter, p proto.Message) error {
md, ok := runtime.ServerMetadataFromContext(ctx)
if !ok {
return nil
}
key := "x-http-code"
// set http status code
if vals := md.HeaderMD.Get(key); len(vals) > 0 {
code , err := strconv.Atoi(vals[0])
if err != nil {
return err
}
// delete the headers to not expose any grpc-metadata in http response
delete(md.HeaderMD,key)
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
w.WriteHeader(code)
}
return nil
}
|
package database
import (
"context"
"fmt"
"github.com/jackc/pgconn"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/log/logrusadapter"
"github.com/jackc/pgx/v4/pgxpool"
_ "github.com/lib/pq"
"github.com/shysa/TP_proxy/config"
"github.com/sirupsen/logrus"
"os"
)
type DB struct {
dbPool *pgxpool.Pool
config *config.ConfDB
}
func NewDB(config *config.ConfDB) *DB {
return &DB{
config: config,
}
}
func (db *DB) Open() error {
conf, err := pgxpool.ParseConfig(fmt.Sprintf(
"user=%s password=%s host=%s dbname=%s sslmode=%s pool_max_conns=%s",
db.config.Username,
db.config.Password,
db.config.Host,
db.config.DbName,
db.config.SslMode,
db.config.MaxConn,
))
if err != nil {
return err
}
looger := &logrus.Logger{
Out: os.Stderr,
Formatter: new(logrus.JSONFormatter),
Hooks: make(logrus.LevelHooks),
Level: logrus.ErrorLevel,
ExitFunc: os.Exit,
ReportCaller: false,
}
conf.ConnConfig.Logger = logrusadapter.NewLogger(looger)
db.dbPool, err = pgxpool.ConnectConfig(context.Background(), conf)
if err != nil {
return err
}
return nil
}
func (db *DB) Close() {
db.dbPool.Close()
}
func (db *DB) Begin(ctx context.Context) (pgx.Tx, error) {
return db.dbPool.Begin(ctx)
}
func (db *DB) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error) {
return db.dbPool.Exec(ctx, sql, arguments...)
}
func (db *DB) Query(ctx context.Context, sql string, optionsAndArgs ...interface{}) (pgx.Rows, error) {
return db.dbPool.Query(ctx, sql, optionsAndArgs...)
}
func (db *DB) QueryRow(ctx context.Context, sql string, optionsAndArgs ...interface{}) pgx.Row {
return db.dbPool.QueryRow(ctx, sql, optionsAndArgs...)
}
func (db *DB) CopyFrom(ctx context.Context, table pgx.Identifier, cols []string, src pgx.CopyFromSource) (int64, error) {
return db.dbPool.CopyFrom(ctx, table, cols, src)
}
|
package models
import (
"strings"
"time"
)
// IntellectualObject in the format that Pharos accepts for
// POST/create.
type IntellectualObjectForPharos struct {
Identifier string `json:"identifier"`
BagName string `json:"bag_name"`
BagGroupIdentifier string `json:"bag_group_identifier"`
InstitutionId int `json:"institution_id"`
Title string `json:"title"`
Description string `json:"description"`
AltIdentifier string `json:"alt_identifier"`
Access string `json:"access"`
DPNUUID string `json:"dpn_uuid"`
ETag string `json:"etag"`
State string `json:"state"`
StorageOption string `json:"storage_option"`
SourceOrganization string `json:"source_organization"`
BagItProfileIdentifier string `json:"bagit_profile_identifier"`
}
func NewIntellectualObjectForPharos(obj *IntellectualObject) *IntellectualObjectForPharos {
return &IntellectualObjectForPharos{
Identifier: obj.Identifier,
BagName: obj.BagName,
BagGroupIdentifier: obj.BagGroupIdentifier,
InstitutionId: obj.InstitutionId,
Title: obj.Title,
Description: obj.Description,
AltIdentifier: obj.AltIdentifier,
Access: strings.ToLower(obj.Access), // Note that Pharos wants lowercase
SourceOrganization: obj.SourceOrganization,
BagItProfileIdentifier: obj.BagItProfileIdentifier,
DPNUUID: obj.DPNUUID,
ETag: obj.ETag,
State: obj.State,
StorageOption: obj.StorageOption,
}
}
// This struct is a special subset of GenericFile, with special JSON
// serialization rules that conform to Rails 4 nested strong paramaters
// naming conventions. When we create GenericFiles in batches, we need
// to send them in this format.
type GenericFileForPharos struct {
Identifier string `json:"identifier"`
IntellectualObjectId int `json:"intellectual_object_id"`
FileFormat string `json:"file_format"`
URI string `json:"uri"`
Size int64 `json:"size"`
StorageOption string `json:"storage_option"`
// TODO: Next two items are not part of Pharos model, but they should be.
// We need to add these to the Rails schema.
// FileCreated time.Time `json:"file_created"`
// FileModified time.Time `json:"file_modified"`
Checksums []*ChecksumForPharos `json:"checksums_attributes"`
PremisEvents []*PremisEventForPharos `json:"premis_events_attributes"`
}
func NewGenericFileForPharos(gf *GenericFile) *GenericFileForPharos {
checksums := make([]*ChecksumForPharos, len(gf.Checksums))
for i, cs := range gf.Checksums {
checksums[i] = NewChecksumForPharos(cs)
}
events := make([]*PremisEventForPharos, len(gf.PremisEvents))
for i, event := range gf.PremisEvents {
events[i] = NewPremisEventForPharos(event)
}
return &GenericFileForPharos{
Identifier: gf.Identifier,
IntellectualObjectId: gf.IntellectualObjectId,
FileFormat: gf.FileFormat,
URI: gf.URI,
Size: gf.Size,
StorageOption: gf.StorageOption,
// TODO: See note above. Add these to Rails!
// FileCreated: gf.FileCreated,
// FileModified: gf.FileModified,
Checksums: checksums,
PremisEvents: events,
}
}
// Same as PremisEvent, but omits CreatedAt and UpdatedAt
type PremisEventForPharos struct {
Id int `json:"id,omitempty"`
Identifier string `json:"identifier"`
EventType string `json:"event_type"`
DateTime time.Time `json:"date_time"`
Detail string `json:"detail"`
Outcome string `json:"outcome"`
OutcomeDetail string `json:"outcome_detail"`
Object string `json:"object"`
Agent string `json:"agent"`
OutcomeInformation string `json:"outcome_information"`
IntellectualObjectId int `json:"intellectual_object_id"`
IntellectualObjectIdentifier string `json:"intellectual_object_identifier"`
GenericFileId int `json:"generic_file_id"`
GenericFileIdentifier string `json:"generic_file_identifier"`
}
func NewPremisEventForPharos(event *PremisEvent) *PremisEventForPharos {
return &PremisEventForPharos{
Id: event.Id,
Identifier: event.Identifier,
EventType: event.EventType,
DateTime: event.DateTime,
Detail: event.Detail,
Outcome: event.Outcome,
OutcomeDetail: event.OutcomeDetail,
Object: event.Object,
Agent: event.Agent,
OutcomeInformation: event.OutcomeInformation,
IntellectualObjectId: event.IntellectualObjectId,
IntellectualObjectIdentifier: event.IntellectualObjectIdentifier,
GenericFileId: event.GenericFileId,
GenericFileIdentifier: event.GenericFileIdentifier,
}
}
// Same as Checksum, but without CreatedAt and UpdatedAt
type ChecksumForPharos struct {
Id int `json:"id,omitempty"` // Do not serialize zero to JSON!
GenericFileId int `json:"generic_file_id"`
Algorithm string `json:"algorithm"`
DateTime time.Time `json:"datetime"`
Digest string `json:"digest"`
}
func NewChecksumForPharos(cs *Checksum) *ChecksumForPharos {
return &ChecksumForPharos{
Id: cs.Id,
GenericFileId: cs.GenericFileId,
Algorithm: cs.Algorithm,
DateTime: cs.DateTime,
Digest: cs.Digest,
}
}
type WorkItemStateForPharos struct {
Id int `json:"id"`
WorkItemId int `json:"work_item_id"`
Action string `json:"action"`
State string `json:"state"`
}
func NewWorkItemStateForPharos(workItemState *WorkItemState) *WorkItemStateForPharos {
return &WorkItemStateForPharos{
Id: workItemState.Id,
WorkItemId: workItemState.WorkItemId,
Action: workItemState.Action,
State: workItemState.State,
}
}
|
package routes
import (
"fmt"
// "reflect"
dg "github.com/bwmarrin/discordgo"
"joebot/tools"
"strings"
)
/*
cID = current channel ID
cmdResList = map of commands and the corresponding responses
*/
var (
cmdResList map[string]string
BotID string
err error
)
func SendMessage(s *dg.Session, cID string, m string) {
// fmt.Printf("@@@: %s, %s", cID, m)
if _, err = s.ChannelMessageSend(cID, m); err != nil {
tools.WriteErr(err)
fmt.Println(err)
}
}
// Quick bot responses
func BotResInit() {
cmdResList = make(map[string]string)
// Fill it up
cmdResList["ourteams"] = "https://docs.google.com/spreadsheets/d/1ykMKW64o71OSfOEtx-iIa25jSZCFVRcZQ73ErXEoFpc/edit#gid=0"
cmdResList["apoc"] = "http://soccerspirits.freeforums.net/thread/69/guide-apocalypse-player-tier-list"
cmdResList["redditss"] = "http://reddit.com/r/soccerspirits"
cmdResList["redditdwu"] = "http://reddit.com/r/dwunleashed"
cmdResList["teamwork"] = "https://docs.google.com/spreadsheets/d/1x0Q4vUk_V3wUwzM5XR_66xytSbapoSFm_cHR9PYIERs/htmlview?sle=true#"
cmdResList["chains"] = "https://ssherder.com/characters/#"
cmdResList["help"] = "Shoutout to ssherder.com, api.lootbox.eu/documentation#/ and gkgirls.info.gf/\n\n" +
"*General Commands:*\n**Write my own Note:** '~mynote <Text>'\n" +
"**Read others Note:** '~note <Discord Name>'\n" +
"**Set Alarm in this channel:** '~setalarm <Name>'\n" +
"**Remove Alarm in this channel:** '~removealarm <Name>'\n\n" +
"*Overwatch Commands:(Lootbox seems to be down for now)*\n" +
"**Lookup PC Profile:** '~PCprofile <Bnet Tag>'\n" +
"**Lookup PC Stats:** '~PCstats <Bnet Tag>'\n" +
"**Lookup PS:** Same thing, except '~PSprofile, ~PSstats'\n" +
"**Lookup Xbox:** Same thing, except '~Xprofile, ~Xstats'\n\n" +
"*Soccer Spirits Commands:*\n**Lookup player info:** '~sstory, ~sstone, ~sslots, ~ssherder or ~sskills <Name>'\n" +
"**Quick links:** '~ourteams', '~apoc', '~reddit'\n\n" +
"*Dynasty Warriors Unleashed Commands:(Deprecated)*\n" +
"**Lookup Officer Legendary Passives:** '~dwup <Name>'\n" +
"**Lookup Officer Stats:** '~dwus <Name>'\n\n" +
"*Goddess Kiss Commands:*\n**Lookup Pilot Skills:** '~gskills <Name>'\n\n" +
"*Everything is case *insensitive!*(Except Bnet Tags)"
}
// This function will be called (due to AddHandler) every time a new
// message is created on any channel that the autenticated bot has access to.
func MessageRoutes(s *dg.Session, m *dg.MessageCreate) {
// Contents
c := m.Content // full message sent by user
// Meta
cID := m.ChannelID
sender := m.Author.Username
// Ignore all messages created by the bot itself and anything short of "~"
if m.Author.ID == BotID {
return
} else if len(c) < 2 || c[0:1] != "~" {
return
}
// split message by command and arguments
cSplit := strings.Split(c[1:], " ") // ["command", ..., ...]
cc := cSplit[0] // "command"
cl := len(cc) + 2
cmdArgs := ""
if len(cmdResList[cc]) != 0 { // if quick command
SendMessage(s, cID, cmdResList[cc])
return
} else if len(cSplit) >= 2 {
cmdArgs = c[cl:]
}
/*
ROUTES
*/
res := ""
switch ccLow := strings.ToLower(cc); ccLow {
/* General */
case "mynote":
if len(cmdArgs) > 0 {
res = myNotes(sender, cmdArgs)
} else {
res = myNotes(sender, "GET")
}
case "note":
res = getNotes(cmdArgs)
case "setalarm":
res = setAlarm(cID, cmdArgs)
case "removealarm":
res = removeAlarm(cID, cmdArgs)
/* Soccer Spirits */
case "sstory":
res = storyRouteSS(cmdArgs)
case "sslots":
res = slotesRouteSS(cmdArgs)
case "ssherder":
res = ssherderRouteSS(cmdArgs)
case "sskills":
res = skillsRouteSS(cmdArgs)
case "sstone":
res = stoneRouteSS(cmdArgs)
/* Overwatch */
case "pcprofile":
res = profileRouteOW(cmdArgs, "pc")
case "pcstats":
res = statsRouteOW(cmdArgs, "pc")
case "psprofile":
res = profileRouteOW(cmdArgs, "psn")
case "psstats":
res = statsRouteOW(cmdArgs, "psn")
case "xprofile":
res = profileRouteOW(cmdArgs, "xbl")
case "xstats":
res = statsRouteOW(cmdArgs, "xbl")
/* Dynasty Warriors Unleashed */
case "dwup":
res = passiveRouteDWU(cmdArgs)
case "dwus":
res = officerRouteDWU(cmdArgs)
/* Goddess Kiss */
case "gskills":
res = skillsRouteGK(cmdArgs)
default:
res = "Enter a valid command"
}
SendMessage(s, cID, res)
return
}
|
package main
import "fmt"
func main() {
//1.使用冒泡排序进行分析和处理76,58,67,18,0,9,
var array = [6]int{76, 58, 67, 18, 0, 9}
bubbleSort(&array)
fmt.Println("array", array)
//2.顺序查找
name := [4]string{"迪迦奥特曼", "赛罗奥特曼", "古加奥特曼"}
var yourinput string
fmt.Println("请输入要查找的光之子:")
fmt.Scanln(&yourinput)
// for i := 0; i < len(name); i++ {
// if yourinput == name[i] {
// fmt.Println("找到了,请相信光吧!")
// break
// } else if i == len(name)-1 {
// //注意这种写法,不建议
// fmt.Println("没有找到光之子")
// }
// }
//第二种方式(推荐)
var isContains = false
for i := 0; i < len(name); i++ {
if yourinput == name[i] {
isContains = true
}
}
if isContains {
fmt.Println("找到了,请相信光吧!")
} else {
fmt.Println("没有找到光之子")
}
//3.二分法查找(有序+递归)
var array2 = [5]int{12, 23, 45, 67, 89}
// (f(0) + f(n-1))/2 == ?
twoF(array2, 0, 4, 23)
}
//冒泡算法
func bubbleSort(array *[6]int) {
var temp int
for j := 1; j < len(array); j++ {
for i := 0; i < len(array)-j; i++ {
//将大的元素往后移一位
if (*array)[i] > (*array)[i+1] {
//与array[i] > array[i+1] 不加*等效
temp = (*array)[i+1]
(*array)[i+1] = (*array)[i]
(*array)[i] = temp
}
}
}
}
//二分法查找算法,也可以用for循环来实现
// func twoF(array [5]int, start int, end int, findint int) {
// //如果左边游标大于或者等于右边游标时,就说明查找不到
// if start > end { //此处必须为 大于>,存在找到最后 start == end 【12,23】
// fmt.Println("查找不到哟")
// return
// }
// var mid = (end + start) / 2
// if findint > array[mid] {
// twoF(array, mid+1, end, findint)
// } else if findint < array[mid] {
// twoF(array, start, mid-1, findint)
// } else {
// //
// fmt.Println("找到了哟哈哈")
// }
// }
//二分查找的for实现,所有的递归都可以通过这种方式进行for转换,一般如果循环的次数是明确的考虑用for,不明确,用递归
//递归:空间换时间,for:时间换空间
func twoF(array [5]int, start int, end int, findint int) {
for start <= end {
mid := (start + end) / 2
if array[mid] > findint {
end = end - 1
} else if array[mid] < findint {
start = start + 1
} else {
fmt.Println("找到了")
return
}
}
}
//排序和查找
// 排序是将一组数据,依指定的顺序进行排列的过程
// 排序的分类
// 1)内部排序:
// 指将需要处理的所有数据都加载到内部存储器中进行排序
// 包括(交换式排序、选择式排序法和插入式排序法)
// 2)外部排序法:
// 数据量过大,无法全部加载到内存中,需要借助外部存储进行排序。包括(合并排序法和直接合并排序法)
// 交换式排序属于内部排序法,是运用数据值比较后,依判断规则对数据位置进行交换,以达到排序的目的
// 2种方式:
// 1.冒泡排序法(Bubble sort)
// 2.快速排序法(Quick sort)
// 查找的分类
// 1)顺序查找:遍历判断相等即可
// 2)二分查找(Binaryfind):必须是有顺序的
|
package handlers
import (
"encoding/json"
"github.com/Hoovs/OpenLibraryClient/server/db"
"github.com/gorilla/mux"
"io/ioutil"
"net/http"
"strconv"
"go.uber.org/zap"
)
type WishListHandler struct {
Logger *zap.Logger
Db *db.DB
}
func (wh *WishListHandler) PostWishListHandler(w http.ResponseWriter, r *http.Request) {
wh.Logger.Info("WishListHandler for post called")
bodyRow := &db.WishListRow{}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
write(w, http.StatusBadRequest, []byte(err.Error()), wh.Logger)
return
}
defer func() {
if err := r.Body.Close(); err != nil {
wh.Logger.Error(err.Error())
}
}()
err = json.Unmarshal(body, bodyRow)
if err != nil {
write(w, http.StatusBadRequest, []byte(err.Error()), wh.Logger)
return
}
err = wh.Db.InsertRow(*bodyRow)
if err != nil {
write(w, http.StatusBadRequest, []byte(err.Error()), wh.Logger)
return
}
}
func (wh *WishListHandler) GetWishListHandler(w http.ResponseWriter, r *http.Request) {
wh.Logger.Info("WishListHandler for get called")
vars := mux.Vars(r)
idStr := vars["wishListId"]
id, err := strconv.Atoi(idStr)
if err != nil {
write(w, http.StatusBadRequest, []byte("Unable to parse wish list id from request"), wh.Logger)
return
}
row, err := wh.Db.GetWishList(id)
if err != nil {
write(w, http.StatusBadRequest, []byte("Unable to fetch row"), wh.Logger)
return
}
req, err := http.NewRequest("GET", "http://localhost:8080/search?q="+row.BookTitle, nil)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
write(w, http.StatusBadRequest, []byte(err.Error()), wh.Logger)
return
}
defer func() {
if err := resp.Body.Close(); err != nil {
wh.Logger.Error(err.Error())
}
}()
v, err := ioutil.ReadAll(resp.Body)
if err != nil {
write(w, http.StatusInternalServerError, []byte("unable to read body"), wh.Logger)
return
}
write(w, http.StatusOK, v, wh.Logger)
b, err := json.Marshal(row)
w.Write(b)
return
}
func (wh *WishListHandler) DeleteWishListHandler(w http.ResponseWriter, r *http.Request) {
wh.Logger.Info("WishListHandler for delete called")
vars := mux.Vars(r)
idStr := vars["wishListId"]
id, err := strconv.Atoi(idStr)
if err != nil {
write(w, http.StatusBadRequest, []byte("Unable to parse wish list id from request"), wh.Logger)
return
}
if err := wh.Db.DeleteWishList(id); err != nil {
wh.Logger.Error(err.Error())
write(w, http.StatusBadRequest, []byte("Unable to delete from wish list"), wh.Logger)
return
}
write(w, http.StatusNoContent, nil, wh.Logger)
return
}
|
package handlers
import (
"github.com/labstack/echo"
"github.com/stretchr/testify/assert"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
)
func TestHandler_ReportObjectDist(t *testing.T) {
q := make(url.Values)
endpoint := "/report/object-dist"
h := &Handler{DB: &mockStore{}}
e := echo.New()
e.Logger.SetOutput(ioutil.Discard)
rec := httptest.NewRecorder()
req := createReq(http.MethodGet, endpoint, q.Encode(), nil)
c := e.NewContext(req, rec)
assert.EqualError(t, h.ReportObjectDist(c), "code=400, message=Incorrect client number")
req = createReq(http.MethodGet, endpoint, q.Encode(), nil)
c = e.NewContext(req, rec)
c.SetParamNames("client")
c.SetParamValues("156")
assert.EqualError(t, h.ReportObjectDist(c), "code=400, message=Incorrect start date format")
q.Set("start_date", "2019-11-19T15:00:01Z")
req = createReq(http.MethodGet, endpoint, q.Encode(), nil)
c = e.NewContext(req, rec)
c.SetParamNames("client")
c.SetParamValues("156")
assert.EqualError(t, h.ReportObjectDist(c), "code=400, message=Incorrect end date format")
q.Set("end_date", "2019-11-20T15:00:01Z")
req = createReq(http.MethodGet, endpoint, q.Encode(), nil)
c = e.NewContext(req, rec)
c.SetParamNames("client")
c.SetParamValues("156")
if assert.NoError(t, h.ReportObjectDist(c)) {
assert.Equal(t, http.StatusOK, rec.Code)
assert.Equal(t, `{"report":[{"id":1,"name":"object 1","first_point_timestamp":"2019-11-19T13:35:01Z","last_point_timestamp":"2019-11-19T14:35:01Z","mileage":30.5}]}`,
strings.Trim(rec.Body.String(), "\n"))
}
}
|
package main
import (
"crypto/tls"
"encoding/json"
//"fmt"
"github.com/emicklei/forest"
"net/http"
"testing"
)
var shw *forest.APITesting
var testPassID string
var testCompletePassID string
var testMutateList []interface{}
func init() {
cfg := &tls.Config{
InsecureSkipVerify: true,
}
tr := &http.Transport{TLSClientConfig: cfg}
client := &http.Client{Transport: tr}
shw = forest.NewClient("https://local.pass.ninja", client)
}
//will need to be refreshed during new tests. Will expire
var testJwToken = `eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE0Mzg0MzMyMjIsImlhdCI6MTQzODE3NDAyMiwic3ViIjoiMTA4NDUwODU0MjAwNjU5OTcxMTI2In0.zqiRRomTfQspHMHHvVyt0EkRzDlaW2crAddPPrc6Ovg`
//////////////////////////////////////////////////////////////////////////
//
//
// Get All Pass
//
//////////////////////////////////////////////////////////////////////////
func TestGetAllPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes").
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 200)
}
func TestGetAllPassBadAuth(t *testing.T) {
bearer := "Bearer " + "k39dk.jidiww.399f"
cfg := forest.NewConfig("/api/v1/passes").
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 401)
}
//////////////////////////////////////////////////////////////////////////
//
// Create Pass
//
//
//////////////////////////////////////////////////////////////////////////
func TestCreatePass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes").
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"name":"testpass","status":"2","passtype":"coupon","keyDoc":{"description":"A pass for testing","organizationName":"tester"}}`)
r := shw.POST(t, cfg)
forest.ExpectStatus(t, r, 201)
forest.ExpectJSONHash(t, r, func(hash map[string]interface{}) {
testPassID = hash["id"].(string)
})
}
func TestCreateConflictPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes").
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"name":"testpass","status":"2","passtype":"coupon","keyDoc":{"description":"A pass for testing","organizationName":"tester"}}`)
r := shw.POST(t, cfg)
forest.ExpectStatus(t, r, 409)
}
func TestCreateMalformPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes").
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"bame":"testpass","passtype":"wiw","keyDoc":{"description":"A pass for testing","organizationName":"tester"}}`)
r := shw.POST(t, cfg)
forest.ExpectStatus(t, r, 422)
}
func TestCreateCompletePass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes").
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{
"name": "deepdiscount",
"passtype": "coupon",
"keyDoc": {
"description": "A super sale from Deep Discounts Co.",
"organizationName": "Deep Discounts Co.",
"locations": [{
"latitude": 37.55346785983141,
"longitude": 126.91327761858702
}],
"coupon": {
"auxiliaryFields": [{
"key": "valid",
"label": "VALID THRU",
"value": "2014-07-31T10:00-05:00",
"dateStyle": "PKDateStyleShort",
"isRelative": true
}, {
"key": "limit",
"label": "LIMIT",
"value": "1 Per Customer"
}, {
"key": "coupon",
"label": "COUPON #",
"value": 131
}],
"headerFields": [{
"key": "headeroffer",
"label": "OFFER",
"value": "In-store"
}],
"primaryFields": [{
"key": "primaryoffer",
"label": "All Summer Sandals",
"value": "40% off"
}]
},
"barcode": {
"format": "PKBarcodeFormatPDF417",
"message": "1234566",
"messageEncoding": "iso-8859-1"
},
"backgroundColor": "rgb(255, 28, 177)",
"foregroundColor": "rgb(255, 255, 255)",
"labelColor": "rgb(96, 57, 19)"
},
"images": [{
"image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAIfUlEQVR4nO2dW6gVVRjHf1aOZveyXioa7EqRREhUFAxWahbHAokMkRKfKiJMeu4xQqJCfKqMQw9hYSZhaWUTlITdqEzBLkxREiWlkR2bk9LDmi37zNn77Jk96zYz3++t2e1vLfz9mT1nzTezpiFoIYmCk4AFwDzgGPAxsCOM02NOJzaAaa4n0ASSKLgC2AhcnfvoM+CeME5/sD+rYkgAKpJEwTXAduDcPv/LL8D8ME732ZtVcSQAFUiiYC7wLv3ld/A2BBKAISkhv4OXIZAADMEQ8jt4FwIJQEkqyO/gVQgkACXQIL+DNyGQABREo/wOXoRAAlAAA/I7OA/BCa4GrgsG5QOcD1xpoG5hJABTYFg+wIYwTjcbql0ICUAfLMj/E1htqHZhJAA9sCAfYH0YpwcN1i+EBCCHJfnjwHMG6xdGAtBFdkt3HWblA3wcxulvhscohASgizBO/wMWAdsMD7XdcP3CSAByhHH6D3AX8I7BYXYZrF2K1gcgiYIwfyyM0yPAfcAhQ8P+bKhuaVodgKyZY1cSBUvzn4VxegDYbWjovw3VLU1rA5Dr5Hk5iYKR3OenAlcYGj4wVLc0J7megAt6tHHNADYmUfA4MAqcjfoz7RxDU5gNfGeodiladwaYoodvBvAsaoXue+AOg9O4zGDtUrQqAAUaOKfiMPA1MKZhKjdqqKGF1gSgovyPgDlhnM4F5gDvV5zOoiQKvPi392ISpqkoH+CRzspdGKe/AndSbZ3gImB+he9ro/EB0CAfYH/3f3QtFu2tUHNNhe9qo9EB0CQf4N78gSwEz1eouTCJggUVvq+FxgZAo3yAJ5MouLPH8VkV665PouD0ijUq0cgAaJYP6k/E17pXDJMomAM8XLHuxcCoywvCxjWFGpCf5yPgDyACTtNUcwOwysWTxI0KgAX5JnkDWBHG6V9VCyVRcBewp0i3cWN+AjyQvwG4Abgd+GSI7y8BPq9yYZhEwZlJFLwIvA7sSKJg4IpjI84AHsh/C1gcxmlnPrOAzcBtQ9bbBqyl4Asmkig4D3gEeBA4q+ujgc8d1D4AHsgHeCyM06e7DyRRMBM1r5sr1P0ReBvYCewDDgApcCpwAXAd6q0k1wPT+9SYMgS1DoAn8kH196/MH8zm94WD+eTpG4LaXgN4JB9gRZ91gsT2RPpwPn2uCWoZAM/kA5yIWidYnDs+qdPIIT1DULufAA/ld/MvqpHkQ9Tv82rgZKczmsyEn4NaBcBz+XXiFyAK4/S72gSgAfLHgU3Ab8AI6pawS74Grq1FABogH2BJGKdbALIbQJuAW9xOiWXeXwQ2RP7ejnyAbLl3BHjP3ZQAuMnrADREPvTovs76Ce4Bfrc/neP40ZfWiwbJB7g0iYJeTSV/4PYxsd1eXgM0TH6HMWBpGKdbOweyPoA9wOUO5nMIuMS7ADRUfocxVBPJS8BM4CngIQfzGEe9xHqzVwFouPxuDqMeD+t3A8ck46i+g1fAo6XgFskHOIX+8n8Hjhoad4J88CQALZPfj2+Bq8M4PQ8IgQ80158kHzxYChb5x1nSvVaQNZVsQc9iUU/54PgMIPInMOFp4WydYAT4pmLdvvLBYQBE/iQm9RNkIXimQs0p5YOjAIj8njyRRMEijfUGygcH1wCZ/Hcx9/KFOjOG+vv8TTh+02gncFXJOoXkg+UAiPxCHAW2oh5IXQxcWPL7heWDxQCIfCuUkg+WrgFEvjbeA+4GlgFf5j4rLR8snAFEvja+BOZlbzPNrxMMJR8MnwFEvla2duTDhHWCDxhSPhgMgMjXTjjpgArBrcPKB0M/ASLfCEeBke5+Ah1oPwOIfGN0Hj7RuVik9wwg8q1wGLgyjNOfdBTTdgYQ+VYYB1bqkg+azgAi3wrjwPIwTjfqLFo5ACLfCkbkQ8UAiHwrGJMPFQIg8q1gVD4MGQCRbwXj8mGIAIh8K1iRDyUDIPKtYE0+lAiAyLeCVflQMAAi3wrW5UOBAIh8KziRDwMCIPKt4Ew+TBEAkW8Fp/KhTwBEvhWcy4ceARD5VvBCPuQCIPKt4I186AqAyLeCV/IhC0ASBXOBHYh8k3gnH2Ba9l773agNjAQzeCkfVEvYckS+SbyVDyoAN7meRIPxWj6oAASuJ9FQvJcPKgBfuZ5EA6mFfFABGEX1mgt6qI18gBPCON0PrMTcu+naRK3kQ/ZgSDZhCUE1aicfJi8FrwBeRD2HJhSnlvKh980gCUE5aisf+t8OlhAUo9byYeqGkEEhGEdtdvA28Clqk8R/UOsKs4HLgBuBRbjfIMkEtZcPg1vCeoXgELAOeCaM0wODBsg2RZgPrAEWDj9Vr2iEfCjWFNodgheANWGcHhxmsGxr9PXU+95DY+RD8bbw5cCRME5fqzpg9vbLUWBJ1VoOaJR8cPS6+Oxn4XngARfjD0nj5IPD/QKyEGyiHmeCRsoHxxtGZD8Hn+P3NUFj5YPjDSOyHTQfdDmHATRaPniwZ1AYp9uBba7n0YPGywcPApCx1vUEcrRCPvgTgB3Aj64nkdEa+eBJAMI4PYZaUnZNq+SDJwHI2Ol4/NbJB78CsM/h2K2UD34FYOCNJUO0Vj74FYDUwZitlg9+BWCW5fFaLx/8CkBocSyRn+FTAOZZGkfkd+FTALTuhNEHkZ/D+fbxAEkUzEbtlDnd4DAivwe+nAEeReQ7wfkZIImCM1EdxWcYGkLkT4EPZ4C1iHxnuO4IWgq8aqi8yC+A6zPALMw8kCryC+K6JWwUWIXeEIj8Eji/CARIouB+VJt41WcRRX5JvAgAaAmByB8CbwIAlUIg8ofEqwDAUCEQ+RXwLgBQKgQivyJeBgAKhUDka8D1OkBfwjh9CVgBjPX4+DBwr8ivjrdngA5JFMxBPT52HXAM9VaSdTq3UG8z/wMNEHKSm+DHFQAAAABJRU5ErkJggg==",
"name": "strip"
}, {
"image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAIfUlEQVR4nO2dW6gVVRjHf1aOZveyXioa7EqRREhUFAxWahbHAokMkRKfKiJMeu4xQqJCfKqMQw9hYSZhaWUTlITdqEzBLkxREiWlkR2bk9LDmi37zNn77Jk96zYz3++t2e1vLfz9mT1nzTezpiFoIYmCk4AFwDzgGPAxsCOM02NOJzaAaa4n0ASSKLgC2AhcnfvoM+CeME5/sD+rYkgAKpJEwTXAduDcPv/LL8D8ME732ZtVcSQAFUiiYC7wLv3ld/A2BBKAISkhv4OXIZAADMEQ8jt4FwIJQEkqyO/gVQgkACXQIL+DNyGQABREo/wOXoRAAlAAA/I7OA/BCa4GrgsG5QOcD1xpoG5hJABTYFg+wIYwTjcbql0ICUAfLMj/E1htqHZhJAA9sCAfYH0YpwcN1i+EBCCHJfnjwHMG6xdGAtBFdkt3HWblA3wcxulvhscohASgizBO/wMWAdsMD7XdcP3CSAByhHH6D3AX8I7BYXYZrF2K1gcgiYIwfyyM0yPAfcAhQ8P+bKhuaVodgKyZY1cSBUvzn4VxegDYbWjovw3VLU1rA5Dr5Hk5iYKR3OenAlcYGj4wVLc0J7megAt6tHHNADYmUfA4MAqcjfoz7RxDU5gNfGeodiladwaYoodvBvAsaoXue+AOg9O4zGDtUrQqAAUaOKfiMPA1MKZhKjdqqKGF1gSgovyPgDlhnM4F5gDvV5zOoiQKvPi392ISpqkoH+CRzspdGKe/AndSbZ3gImB+he9ro/EB0CAfYH/3f3QtFu2tUHNNhe9qo9EB0CQf4N78gSwEz1eouTCJggUVvq+FxgZAo3yAJ5MouLPH8VkV665PouD0ijUq0cgAaJYP6k/E17pXDJMomAM8XLHuxcCoywvCxjWFGpCf5yPgDyACTtNUcwOwysWTxI0KgAX5JnkDWBHG6V9VCyVRcBewp0i3cWN+AjyQvwG4Abgd+GSI7y8BPq9yYZhEwZlJFLwIvA7sSKJg4IpjI84AHsh/C1gcxmlnPrOAzcBtQ9bbBqyl4Asmkig4D3gEeBA4q+ujgc8d1D4AHsgHeCyM06e7DyRRMBM1r5sr1P0ReBvYCewDDgApcCpwAXAd6q0k1wPT+9SYMgS1DoAn8kH196/MH8zm94WD+eTpG4LaXgN4JB9gRZ91gsT2RPpwPn2uCWoZAM/kA5yIWidYnDs+qdPIIT1DULufAA/ld/MvqpHkQ9Tv82rgZKczmsyEn4NaBcBz+XXiFyAK4/S72gSgAfLHgU3Ab8AI6pawS74Grq1FABogH2BJGKdbALIbQJuAW9xOiWXeXwQ2RP7ejnyAbLl3BHjP3ZQAuMnrADREPvTovs76Ce4Bfrc/neP40ZfWiwbJB7g0iYJeTSV/4PYxsd1eXgM0TH6HMWBpGKdbOweyPoA9wOUO5nMIuMS7ADRUfocxVBPJS8BM4CngIQfzGEe9xHqzVwFouPxuDqMeD+t3A8ck46i+g1fAo6XgFskHOIX+8n8Hjhoad4J88CQALZPfj2+Bq8M4PQ8IgQ80158kHzxYChb5x1nSvVaQNZVsQc9iUU/54PgMIPInMOFp4WydYAT4pmLdvvLBYQBE/iQm9RNkIXimQs0p5YOjAIj8njyRRMEijfUGygcH1wCZ/Hcx9/KFOjOG+vv8TTh+02gncFXJOoXkg+UAiPxCHAW2oh5IXQxcWPL7heWDxQCIfCuUkg+WrgFEvjbeA+4GlgFf5j4rLR8snAFEvja+BOZlbzPNrxMMJR8MnwFEvla2duTDhHWCDxhSPhgMgMjXTjjpgArBrcPKB0M/ASLfCEeBke5+Ah1oPwOIfGN0Hj7RuVik9wwg8q1wGLgyjNOfdBTTdgYQ+VYYB1bqkg+azgAi3wrjwPIwTjfqLFo5ACLfCkbkQ8UAiHwrGJMPFQIg8q1gVD4MGQCRbwXj8mGIAIh8K1iRDyUDIPKtYE0+lAiAyLeCVflQMAAi3wrW5UOBAIh8KziRDwMCIPKt4Ew+TBEAkW8Fp/KhTwBEvhWcy4ceARD5VvBCPuQCIPKt4I186AqAyLeCV/IhC0ASBXOBHYh8k3gnH2Ba9l773agNjAQzeCkfVEvYckS+SbyVDyoAN7meRIPxWj6oAASuJ9FQvJcPKgBfuZ5EA6mFfFABGEX1mgt6qI18gBPCON0PrMTcu+naRK3kQ/ZgSDZhCUE1aicfJi8FrwBeRD2HJhSnlvKh980gCUE5aisf+t8OlhAUo9byYeqGkEEhGEdtdvA28Clqk8R/UOsKs4HLgBuBRbjfIMkEtZcPg1vCeoXgELAOeCaM0wODBsg2RZgPrAEWDj9Vr2iEfCjWFNodgheANWGcHhxmsGxr9PXU+95DY+RD8bbw5cCRME5fqzpg9vbLUWBJ1VoOaJR8cPS6+Oxn4XngARfjD0nj5IPD/QKyEGyiHmeCRsoHxxtGZD8Hn+P3NUFj5YPjDSOyHTQfdDmHATRaPniwZ1AYp9uBba7n0YPGywcPApCx1vUEcrRCPvgTgB3Aj64nkdEa+eBJAMI4PYZaUnZNq+SDJwHI2Ol4/NbJB78CsM/h2K2UD34FYOCNJUO0Vj74FYDUwZitlg9+BWCW5fFaLx/8CkBocSyRn+FTAOZZGkfkd+FTALTuhNEHkZ/D+fbxAEkUzEbtlDnd4DAivwe+nAEeReQ7wfkZIImCM1EdxWcYGkLkT4EPZ4C1iHxnuO4IWgq8aqi8yC+A6zPALMw8kCryC+K6JWwUWIXeEIj8Eji/CARIouB+VJt41WcRRX5JvAgAaAmByB8CbwIAlUIg8ofEqwDAUCEQ+RXwLgBQKgQivyJeBgAKhUDka8D1OkBfwjh9CVgBjPX4+DBwr8ivjrdngA5JFMxBPT52HXAM9VaSdTq3UG8z/wMNEHKSm+DHFQAAAABJRU5ErkJggg==",
"name": "logo"
}],
"status": "ready"
}`)
r := shw.POST(t, cfg)
forest.ExpectStatus(t, r, 201)
forest.ExpectJSONHash(t, r, func(hash map[string]interface{}) {
testCompletePassID = hash["id"].(string)
})
}
//////////////////////////////////////////////////////////////////////////
//
// Get Pass
//
//
//////////////////////////////////////////////////////////////////////////
func TestGetPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testPassID).
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 200)
}
func TestGetBadIDPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", "wnOoZP6On9nvRPOSGFq9M6nhUO3ncbH5nnzLlWaMYso=").
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 404)
}
//////////////////////////////////////////////////////////////////////////
//
// Get Link
//
//
//////////////////////////////////////////////////////////////////////////
func TestGetCompletePassLink(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}/link", testCompletePassID).
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 200)
}
func TestGetIncompletePassLink(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}/link", testPassID).
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 403)
}
func TestGetBadIDPassLink(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}/link", "wnOoZP6On9nvRPOSGFq9M6nhUO3ncbH5nnzLlWaMYso=").
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 404)
}
//////////////////////////////////////////////////////////////////////////
//
// Update Pass
//
//
//////////////////////////////////////////////////////////////////////////
func TestUpdatePass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testPassID).
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"name":"testpass","status":"2", "keyDoc": {"labelColor": "rgb(255,255,255)","foregroundColor": "rgb(240,34,19)","backgroundColor": "rgb(119,20,234)"}}`)
r := shw.PATCH(t, cfg)
forest.ExpectStatus(t, r, 200)
}
func TestUpdateBadPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testPassID).
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"status":"2", "keyDoc": {"labelColor": "rgb(255,255,255)","foregroundColor": "rgb(240,34,19)","backgroundColor": "rgb(119,20,234)"}}`)
r := shw.PATCH(t, cfg)
forest.ExpectStatus(t, r, 400)
}
func TestUpdateInvalidPass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testPassID).
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"name":"testpass","status":"2", "keyDoc": {"labelColor": "white","foregroundColor": "#fff","backgroundColor": "rgb(119,20,234)"}}`)
r := shw.PATCH(t, cfg)
forest.ExpectStatus(t, r, 422)
}
//////////////////////////////////////////////////////////////////////////
//
// Mutate Pass
//
//
//////////////////////////////////////////////////////////////////////////
func TestUpdateForMutatePass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testCompletePassID).
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(`{"name": "deepdiscount","status":"api", "mutatelist": ["limit","coupon"]}`)
r := shw.PATCH(t, cfg)
forest.ExpectStatus(t, r, 200)
}
func TestGetMutateList(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}/mutate", testCompletePassID).
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.GET(t, cfg)
forest.ExpectStatus(t, r, 200)
forest.ExpectJSONHash(t, r, func(hash map[string]interface{}) {
testMutateList = hash["mutatelist"].([]interface{})
//fmt.Printf("%v", testMutateList)
})
}
func TestMutatePass(t *testing.T) {
mutateObject := make(map[string]string)
for i, _ := range testMutateList {
listItem := testMutateList[i].(string)
mutateObject[listItem] = "foo"
}
mutateJson, _ := json.Marshal(mutateObject)
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}/mutate", testCompletePassID).
Header("Accept", "application/json").
Header("Authorization", bearer).
Body(string(mutateJson))
r := shw.PATCH(t, cfg)
forest.ExpectStatus(t, r, 200)
/*forest.ExpectJSONHash(t, r, func(hash map[string]interface{}) {
passurl := hash["url"].(string)
fmt.Printf("%v", passurl)
})*/
}
//////////////////////////////////////////////////////////////////////////
//
// Delete Pass
//
//
//////////////////////////////////////////////////////////////////////////
func TestDeletePass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testPassID).
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.DELETE(t, cfg)
forest.ExpectStatus(t, r, 204)
}
func TestDeleteCompletePass(t *testing.T) {
bearer := "Bearer " + testJwToken
cfg := forest.NewConfig("/api/v1/passes/{ID}", testCompletePassID).
Header("Accept", "application/json").
Header("Authorization", bearer)
r := shw.DELETE(t, cfg)
forest.ExpectStatus(t, r, 204)
}
|
package reporting
type Report interface {
Encode() ([]byte, error)
Description() string
}
|
package status
import (
"bytes"
"fmt"
"github.com/gookit/color"
)
type ChangeType string
const (
UnModified ChangeType = "unmodified"
Modified ChangeType = "modified:"
Created ChangeType = "new file:"
Deleted ChangeType = "deleted: "
)
type Changes struct {
Head ChangeType
Worktree ChangeType
}
type Change struct {
path string
changeType *ChangeType
}
type Status struct {
staged []Change
unstaged []Change
untracked []Change
}
func (s *Status) String() string {
buf := bytes.NewBuffer(nil)
if len(s.staged) > 0 {
fmt.Fprintln(buf, "Changes to be committed:")
fmt.Fprintln(buf, " (use \"git restore --staged <file>...\" to unstage)")
for _, staged := range s.staged {
fmt.Fprint(buf, color.Green.Sprintf(" %s %s\n", *staged.changeType, staged.path))
}
fmt.Fprintln(buf)
}
if len(s.unstaged) > 0 {
fmt.Fprintln(buf, "Changes not staged for commit:")
fmt.Fprintln(buf, " (use \"git add <file>...\" to update what will be committed)")
fmt.Fprintln(buf, " (use \"git restore <file>...\" to discard changes in working directory)")
for _, unstaged := range s.unstaged {
fmt.Fprint(buf, color.Red.Sprintf(" %s %s\n", *unstaged.changeType, unstaged.path))
}
fmt.Fprintln(buf)
}
if len(s.untracked) > 0 {
fmt.Fprintln(buf, "Untracked files:")
fmt.Fprintln(buf, " (use \"git add <file>...\" to include in what will be committed)")
for _, untracked := range s.untracked {
fmt.Fprint(buf, color.Red.Sprintf(" %s\n", untracked.path))
}
}
return buf.String()
}
|
package resource
import (
"fmt"
"strings"
)
// Type is a type of resource.
type Type int
// Page types.
const (
TypeDrive Type = 0
TypeFile Type = 1
)
// String returns a string representation of t.
func (t Type) String() string {
switch t {
case TypeDrive:
return "drive#teamDrive"
case TypeFile:
return "drive#file"
default:
return fmt.Sprintf("resource type %d", t)
}
}
// ParseType parses v as a page type.
func ParseType(v string) (Type, error) {
switch strings.ToLower(v) {
case "drive#teamDrive":
return TypeDrive, nil
case "drive#file":
return TypeFile, nil
default:
return Type(0), fmt.Errorf("unknown resource type \"%s\"", v)
}
}
|
package web_test
import (
"encoding/json"
"net/http"
"strings"
"testing"
"github.com/gofiber/session"
"github.com/stretchr/testify/assert"
"github.com/hi019/fiber-boilerplate/ent"
"github.com/hi019/fiber-boilerplate/ent/enttest"
"github.com/go-playground/validator"
"github.com/gofiber/fiber"
"github.com/rs/zerolog"
_ "github.com/mattn/go-sqlite3"
us "github.com/hi019/fiber-boilerplate/pkg/api/user"
"github.com/hi019/fiber-boilerplate/pkg/api/user/web"
)
type user struct {
Email string
ID int
}
func createAPI(t *testing.T) (*fiber.App, *ent.Client) {
client := enttest.Open(t, "sqlite3", "file:ent?mode=memory&cache=shared&_fk=1")
app := fiber.New()
logger := &zerolog.Logger{}
svc := us.Initialize(client, logger)
vd := validator.New()
web.NewHTTP(svc, app, vd, &session.Session{})
return app, client
}
func TestSignup(t *testing.T) {
app, db := createAPI(t)
defer db.Close()
cases := []struct {
name string
req string
wantStatus int
wantResp *user
}{
{
name: "Create a user",
req: `{"email": "test@email.com", "password": "password"}`,
wantStatus: fiber.StatusOK,
},
{
name: "Fail creating a user with invalid email",
req: `{"email": "test", "password": "password"}`,
wantStatus: fiber.StatusBadRequest,
},
{
name: "Fail creating a user that already exists",
req: `{"email": "test@email.com", "password": "password"}`,
wantStatus: fiber.StatusBadRequest,
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("POST", "/signup", strings.NewReader(tt.req))
req.Header.Add("Content-Type", "application/json")
resp, err := app.Test(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if tt.wantResp != nil {
response := new(user)
if err := json.NewDecoder(resp.Body).Decode(response); err != nil {
t.Fatal(err)
}
assert.Equal(t, tt.wantResp, response)
}
assert.Equal(t, tt.wantStatus, resp.StatusCode)
})
}
}
|
package autocert
import (
"context"
"os"
"runtime"
"testing"
"time"
"github.com/caddyserver/certmagic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/pomerium/pomerium/internal/testutil"
)
func TestGCSStorage(t *testing.T) {
t.Skip("fakeserver doesn't support multipart uploads")
ctx, clearTimeout := context.WithTimeout(context.Background(), time.Second*30)
t.Cleanup(clearTimeout)
require.NoError(t, testutil.WithTestGCS(t, "bucket", func() error {
s, err := GetCertMagicStorage(ctx, "gs://bucket/some/prefix")
if !assert.NoError(t, err) {
return nil
}
runStorageTests(t, s)
return nil
}))
}
func TestS3Storage(t *testing.T) {
if os.Getenv("GITHUB_ACTION") != "" && runtime.GOOS == "darwin" {
t.Skip("Github action can not run docker on MacOS")
}
ctx, clearTimeout := context.WithTimeout(context.Background(), time.Second*30)
t.Cleanup(clearTimeout)
require.NoError(t, testutil.WithTestMinIO(t, "bucket", func(endpoint string) error {
s, err := GetCertMagicStorage(ctx, "s3://"+endpoint+"/bucket/some/prefix")
if !assert.NoError(t, err) {
return nil
}
runStorageTests(t, s)
return nil
}))
}
func runStorageTests(t *testing.T, s certmagic.Storage) {
t.Helper()
ctx, clearTimeout := context.WithTimeout(context.Background(), time.Second*30)
t.Cleanup(clearTimeout)
for _, key := range []string{"1", "a/1", "b/c/2"} {
assert.NoError(t, s.Store(ctx, key, []byte{1, 2, 3}), "should store")
assert.True(t, s.Exists(ctx, key), "should exist after storing")
data, err := s.Load(ctx, key)
if assert.NoError(t, err, "should load") {
assert.Equal(t, []byte{1, 2, 3}, data)
}
ki, err := s.Stat(ctx, key)
if assert.NoError(t, err) {
assert.Equal(t, true, ki.IsTerminal)
}
}
keys, err := s.List(ctx, "", true)
assert.NoError(t, err, "should list recursively")
assert.Equal(t, []string{"1", "a/1", "b/c/2"}, keys)
keys, err = s.List(ctx, "b/", false)
assert.NoError(t, err, "should list non-recursively")
assert.Equal(t, []string{"b/c/"}, keys)
assert.NoError(t, s.Delete(ctx, "a/b/c"), "should delete")
_, err = s.Load(ctx, "a/b/c")
assert.Error(t, err)
assert.NoError(t, s.Lock(ctx, "a"), "should lock")
time.AfterFunc(time.Second*2, func() {
s.Unlock(ctx, "a")
})
assert.NoError(t, s.Lock(ctx, "a"), "should re-lock")
}
|
package main
import (
"encoding/binary"
"fmt"
"net"
"time"
)
func main(){
var number uint64
var buf = make([]byte, 16)
// ----------------------- SETT OPP UDP-KOBLING -----------------------
// Creat Server Address,
ServerAddr, err := net.ResolveUDPAddr("udp", "10.100.23.233:10001")
if err != nil {
fmt.Println("Error: ", err)
}
LocalAddr, err := net.ResolveUDPAddr("udp", "10.100.23.233:0")
if err != nil {
// fmt.Println("Error: ", err)
}
Conn, err := net.DialUDP("udp", LocalAddr, ServerAddr)
if err != nil {
// fmt.Println("Error: ", err)
}
for {
binary.BigEndian.PutUint64(buf, number)
_, errW := Conn.Write(buf)
if errW != nil {
fmt.Println("Kunne ikke skrive")
//fmt.Println(errW)
} else {
fmt.Println("\t Number: ", number, "\t")
number++
}
time.Sleep(1 * time.Second)
}
}
|
package yadisk
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
)
// httpClient for send request to Yandex.Disk API
type client struct {
httpClient *http.Client
token *Token
baseURL *url.URL
ctx context.Context
}
// Construct httpClient
func newClient(ctx context.Context, token *Token, baseURL string, version int, httpClient *http.Client) (*client, error) {
if httpClient == nil {
httpClient = http.DefaultClient
}
base, e := url.Parse(baseURL + fmt.Sprintf("/v%d", version))
if e != nil {
return nil, e
}
c := &client{httpClient: httpClient, token: token, baseURL: base, ctx: ctx}
return c, nil
}
func (c *client) setRequestHeaders(req *http.Request) {
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "OAuth "+c.token.AccessToken)
}
func (c *client) request(method string, pathURL string, body io.Reader) (*http.Request, error) {
rel, e := url.Parse(c.baseURL.Path + pathURL)
if e != nil {
return nil, e
}
fullURL := c.baseURL.ResolveReference(rel)
req, e := http.NewRequest(method, fullURL.String(), body)
if e != nil {
return nil, e
}
c.setRequestHeaders(req)
return req, nil
}
func (c *client) do(req *http.Request) (*http.Response, error) {
resp, e := c.httpClient.Do(req.WithContext(c.ctx))
if e != nil {
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
return nil, e
}
return resp, e
}
func (c *client) getResponse(req *http.Request, obj interface{}) (i *responseInfo, e error) {
resp, e := c.do(req)
if e != nil {
return
}
defer bodyClose(resp.Body)
i = new(responseInfo)
i.setResponseInfo(resp.Status, resp.StatusCode)
if e != nil {
return
}
body, e := ioutil.ReadAll(resp.Body)
if e != nil {
return
}
if len(body) > 0 {
err := new(Error)
e = json.Unmarshal(body, &err)
if e != nil {
return
} else if (Error{}) != *err {
return i, err
}
e = json.Unmarshal(body, &obj)
if e != nil {
return
}
}
return i, nil
}
func bodyClose(closer io.Closer) {
e := closer.Close()
if e != nil {
panic(e.Error())
}
}
func getRange(start, end, total int64) string {
return fmt.Sprintf("bytes %d-%d/%d", start, end, total)
}
func requestWithRange(ur *ResourceUploadLink, data []byte, partSize, contentLength int64, portions int) ([]*http.Request, error) {
portionSize := partSize
startSize := int64(0)
reqs := make([]*http.Request, portions)
for i := 0; i < portions; i++ {
var dataSize []byte
if i == portions-1 {
portionSize = contentLength
dataSize = data[startSize:contentLength]
} else {
dataSize = data[startSize:portionSize]
}
req, e := http.NewRequest(ur.Method, ur.Href, bytes.NewReader(dataSize))
if e != nil {
return nil, e
}
req.Header.Set("Content-Range", getRange(startSize, portionSize-1, contentLength))
reqs[i] = req
startSize = portionSize
portionSize += partSize
}
return reqs, nil
}
|
package list
import (
"crypto/tls"
"encoding/json"
"fmt"
"log"
"time"
"github.com/mickep76/auth/jwt"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/mickep76/grpc-exec-example/conf"
pb_info "github.com/mickep76/grpc-exec-example/info"
"github.com/mickep76/grpc-exec-example/tlscfg"
)
func list(c *Config, addr string, cfg *tls.Config, creds credentials.PerRPCCredentials) {
conn, err := grpc.Dial(addr,
grpc.WithTransportCredentials(credentials.NewTLS(cfg)),
grpc.WithPerRPCCredentials(creds))
if err != nil {
log.Printf("connect: %v", err)
return
}
defer conn.Close()
clnt := pb_info.NewInfoClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
systems, err := clnt.ListSystems(ctx, &pb_info.ListRequest{})
if err != nil {
log.Printf("list: %v", err)
return
}
if c.AsJson {
b, _ := json.MarshalIndent(systems, "", " ")
fmt.Println(string(b))
} else {
for _, s := range systems.Systems {
fmt.Print(s.FmtStringColor(addr))
}
}
}
func Cmd(args []string) {
c := newConfig()
if err := conf.Load([]string{"/etc/client.toml", "~/.client.toml"}, c); err != nil {
log.Fatalf("config: %v", err)
}
fl := c.setFlags()
conf.ParseFlags(fl, args, c)
tlsCfg, err := tlscfg.NewConfig(c.Ca, "", "", "", false)
if err != nil {
log.Fatal(err)
}
token, err := jwt.LoadSignedToken(c.Token)
if err != nil {
log.Fatal(err)
}
list(c, c.Catalog, tlsCfg, token)
}
|
package main
import (
"regexp"
"strconv"
"strings"
)
func zhunbeishuchu(class Class)NewClass{
day, _ := strconv.Atoi(class.Day)
theday := "一二三四五六七"[(day-1)*3:day*3]
lesson := strings.Split(class.Lesson, "-")
lesson1,_ := strconv.Atoi(lesson[0])
lesson2,_ := strconv.Atoi(lesson[1])
var thelesson string
for i := lesson1;i <= lesson2;i++{
if i > 10 {
thelesson = thelesson+"十"+"一二"[(i-11)*3:(i-10)*3]
}else{
thelesson += "一二三四五六七八九十"[(i-1)*3:i*3]
}
}
if class.Rewweek[:1] == " "{
class.Rewweek = class.Rewweek[1:]
}
rawweek := strings.Split(class.Rewweek, ",")
var week []int
for _,u := range rawweek{
if strings.Index(u,"-") == -1{
imgReg2:=regexp.MustCompile(`(.*?)周`)
imgInfo2 := imgReg2.FindStringSubmatch(u)
i,_ := strconv.Atoi(imgInfo2[1])
week = append(week,i)
}else{
imgReg:=regexp.MustCompile(`(.*?)-(.*?)周`)
imgInfo := imgReg.FindStringSubmatch(u)
rawweek1,_ := strconv.Atoi(imgInfo[1])
rawweek2,_ := strconv.Atoi(imgInfo[2])
for i := rawweek1; i <=rawweek2 ;i++{
week = append(week,i)
}
}
}
var weekModel string
if strings.Contains(class.Rewweek, "双周") {
weekModel = "double"
}else if strings.Contains(class.Rewweek, "双周"){
weekModel = "singel"
}else{
weekModel = "all"
}
newclass := NewClass{
Hash_day : day-1,
Hash_lesson : 0,
Begin_lesson : lesson1,
Class : class,
WeekModel : weekModel,
Weekbegin : week[0],
Weekend : week[len(week)-1],
Period : "2",
Week : week,
}
newclass.Class.Day = "星期"+theday
newclass.Class.Lesson = thelesson +"节"
return newclass
}
func fenlitongjieke(classes Class,newclass []NewClass)[]NewClass{
arr2 := strings.Split(classes.Day, " # ")
arr3 := strings.Split(classes.Lesson, " # ")
arr4 := strings.Split(classes.Teacher, " # ")
arr5 := strings.Split(classes.Classroom, " # ")
arr6 := strings.Split(classes.Rewweek, " # ")
for i2,_ := range arr2{
class := classes
class.Day = arr2[i2]
class.Lesson = arr3[i2]
class.Teacher = arr4[i2]
class.Classroom = arr5[i2]
class.Rewweek = arr6[i2]
newclass = append(newclass,zhunbeishuchu(class))
}
return newclass
}
|
package main
import (
"net/http"
"github.com/dxvgef/tsing"
)
func main() {
engine := tsing.New(&tsing.Config{})
engine.GET("/", func(context *tsing.Context) error {
context.ResponseWriter.Write([]byte("hello world"))
return nil
})
http.ListenAndServe(":5656", engine)
}
|
package main
import (
"log"
"github.com/BurntSushi/toml"
)
type AppConfig struct {
Magento *MagentoConfig
Hpfeeds *HpfeedsConfig
PublicIP *PublicIPConfig `toml:"fetch_public_ip"`
}
// MagentoConfig provides configuration for how to host the Magento web app
// portion of the honeypot.
// [magento]
type MagentoConfig struct {
Port int
MagentoVersionText string `toml:"magento_version_text"`
SiteName string `toml:"site_name"`
NameRandomizer bool `toml:"name_randomizer"`
HeaderServer string `toml:"header_server"`
HeaderContentLanguage string `toml:"header_content_language"`
}
// HpfeedsConfig provides configuration for connecting to an hpfeeds broker
// server and credentials for publishing data.
// [hpfeeds]
type HpfeedsConfig struct {
Enabled bool
Host string
Port int
Ident string
Auth string
Channel string
Meta string
}
// [fetch_public_ip]
type PublicIPConfig struct {
Enabled bool
URLs []string
}
func loadConfig(filename string) *AppConfig {
var c AppConfig
_, err := toml.DecodeFile(filename, &c)
if err != nil {
log.Fatalf("Unable to parse config file: %s\n", err.Error())
}
return &c
}
|
package outline
type StyleType string
const Dotted StyleType = "dotted"
const Dashed StyleType = "dashed"
const Solid StyleType = "solid"
const Double StyleType = "double"
const Groove StyleType = "groove"
const Ridge StyleType = "ridge"
const Inset StyleType = "inset"
const Outset StyleType = "outset"
|
package mws
import (
"fmt"
"github.com/databrickslabs/databricks-terraform/common"
)
// NewMWSCustomerManagedKeysAPI creates MWSCustomerManagedKeysAPI instance from provider meta
func NewMWSCustomerManagedKeysAPI(m interface{}) MWSCustomerManagedKeysAPI {
return MWSCustomerManagedKeysAPI{client: m.(*common.DatabricksClient)}
}
// MWSCustomerManagedKeysAPI exposes the mws customerManagedKeys API
type MWSCustomerManagedKeysAPI struct {
client *common.DatabricksClient
}
// Create creates a set of MWS CustomerManagedKeys for the BYOVPC
func (a MWSCustomerManagedKeysAPI) Create(mwsAcctID, keyArn, keyAlias, keyRegion string) (k CustomerManagedKey, err error) {
customerManagedKeysAPIPath := fmt.Sprintf("/accounts/%s/customer-managed-keys", mwsAcctID)
err = a.client.Post(customerManagedKeysAPIPath, CustomerManagedKey{
AwsKeyInfo: &AwsKeyInfo{
KeyArn: keyArn,
KeyAlias: keyAlias,
KeyRegion: keyRegion,
},
}, &k)
return
}
// Read returns the customer managed key object along with metadata
func (a MWSCustomerManagedKeysAPI) Read(
mwsAcctID, customerManagedKeysID string) (k CustomerManagedKey, err error) {
err = a.client.Get(fmt.Sprintf("/accounts/%s/customer-managed-keys/%s",
mwsAcctID, customerManagedKeysID), nil, &k)
return
}
// Delete deletes the customer managed key object given a network id
func (a MWSCustomerManagedKeysAPI) Delete(customerManagedKeysID string) error {
return fmt.Errorf("delete is not yet supported")
}
// List lists all the available customer managed key objects in the mws account
func (a MWSCustomerManagedKeysAPI) List(mwsAcctID string) (kl []CustomerManagedKey, err error) {
err = a.client.Get(fmt.Sprintf("/accounts/%s/customer-managed-keys", mwsAcctID), nil, &kl)
return
}
|
package api
import (
"fmt"
"sharemusic/models/util"
)
func SongUrl(query map[string]interface{}) map[string]interface{} {
ids := "[" + query["id"].(string) + "]"
fmt.Println(query["id"])
data := map[string]interface{}{
"ids": ids,
"br": query["br"],
}
if data["br"] == nil {
data["br"] = 999000
}
options := map[string]interface{}{
"crypto": "linuxapi",
}
return util.CreateRequest("https://music.163.com/api/song/enhance/player/url", data, options)
}
|
package tbf
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"testing"
"time"
"github.com/golang/protobuf/proto"
"go.mercari.io/datastore"
"go.mercari.io/datastore/testsuite"
netcontext "golang.org/x/net/context"
"google.golang.org/appengine"
)
// TestSuite contains all the test cases that this package provides.
var TestSuite = map[string]testsuite.Test{
"RealWorld_TBF": tbf,
}
func init() {
testsuite.MergeTestSuite(TestSuite)
}
type contextClient struct{}
type contextBatch struct{}
func timeNow() time.Time {
l, err := time.LoadLocation("Asia/Tokyo")
if err != nil {
panic(err)
}
return time.Date(2017, 11, 8, 10, 11, 12, 13, l)
}
func tbf(ctx context.Context, t *testing.T, client datastore.Client) {
defer func() {
err := client.Close()
if err != nil {
t.Fatal(err)
}
}()
// それぞれのCircleに4枚の画像を持たせて、Circleを10件Getしたい。RPCは Query 1回 + BatchGet 1回 の合計2回がいい
// clientは複数作ってぶん回すとどっかでブロックされて処理が返ってこなくなるので使いまわす
ctx = context.WithValue(ctx, contextClient{}, client)
// batchは再利用可能
batch := client.Batch()
ctx = context.WithValue(ctx, contextBatch{}, batch)
rpcCount := 0
inMemcacheTestSuite := false
if testsuite.IsAEDatastoreClient(ctx) {
// checking rpc count when testing in ae.
ctx = appengine.WithAPICallFunc(ctx, func(ctx netcontext.Context, service, method string, in, out proto.Message) error {
t.Log(service, method)
if service == "datastore_v3" {
rpcCount++
}
if service == "memcache" {
// if memcache service called, this test in the TestAEDatastoreWithAEMemcacheTestSuite.
inMemcacheTestSuite = true
}
return appengine.APICall(ctx, service, method, in, out)
})
}
const circleLimit = 10
const imageLimit = 4
// Prepare entities
for i := 0; i < circleLimit; i++ {
// NOTE Don't use client.AllocateIDs for JSON format consistency
circleID := CircleID(1000000 + 10000*i)
circleKey := circleID.ToKey(client)
circle := &Circle{
ID: circleID,
Name: fmt.Sprintf("サークル #%d", i+1),
}
for j := 0; j < imageLimit; j++ {
// NOTE Don't use client.AllocateIDs for JSON format consistency
imageID := imageID(circleKey.ID() + 1000 + int64(10*j))
imageKey := imageID.ToKey(client)
image := &Image{
ID: imageID,
OwnerCircleID: circleID,
GCSPath: fmt.Sprintf("%d/%d.jpg", circleKey.ID(), imageKey.ID()),
}
batch.Put(imageKey, image, nil)
circle.ImageIDs = append(circle.ImageIDs, image.ID)
circle.Images = append(circle.Images, image)
}
batch.Put(circleKey, circle, nil)
}
err := batch.Exec(ctx)
if err != nil {
t.Fatal(err)
}
if testsuite.IsAEDatastoreClient(ctx) && !inMemcacheTestSuite {
if rpcCount != 1 {
t.Errorf("unexpected: %v", rpcCount)
}
}
// fetch entities
rpcCount = 0
q := client.NewQuery(kindCircle)
var circleList []*Circle
_, err = client.GetAll(ctx, q, &circleList)
if err != nil {
t.Fatal(err)
}
err = batch.Exec(ctx)
if err != nil {
t.Fatal(err)
}
if testsuite.IsAEDatastoreClient(ctx) && !inMemcacheTestSuite {
if rpcCount != 2 {
t.Errorf("unexpected: %v", rpcCount)
}
}
if v := len(circleList); v != circleLimit {
t.Errorf("unexpected: %v", v)
}
for _, circle := range circleList {
if v := len(circle.Images); v != imageLimit {
t.Errorf("unexpected: %v", v)
}
for _, image := range circle.Images {
if v := image.GCSPath; !strings.HasPrefix(v, fmt.Sprintf("%d/", circle.ID)) {
t.Errorf("unexpected: %v", v)
}
}
}
{ // obj <-> JSON
b, err := json.MarshalIndent(circleList, "", " ")
if err != nil {
t.Fatal(err)
}
const filePath = "../testsuite/realworld/tbf/realworld-tbf.json"
expected, err := ioutil.ReadFile(filePath)
if err != nil {
err = ioutil.WriteFile(filePath, b, 0644)
if err != nil {
t.Fatal(err)
}
expected = b
}
if !bytes.Equal(b, expected) {
t.Fatalf("unexpected json format. hint: rm %s", filePath)
}
var newCircleList []*Circle
err = json.Unmarshal(b, &newCircleList)
if err != nil {
t.Fatal(err)
}
if v := len(newCircleList); v != circleLimit {
t.Errorf("unexpected: %v", v)
}
for _, circle := range newCircleList {
if v := len(circle.Images); v != imageLimit {
t.Errorf("unexpected: %v", v)
}
for _, image := range circle.Images {
if v := image.GCSPath; !strings.HasPrefix(v, fmt.Sprintf("%d/", circle.ID)) {
t.Errorf("unexpected: %v", v)
}
}
}
}
{ // Naked Get
q := client.NewQuery(kindImage)
var pss []datastore.PropertyList
_, err = client.GetAll(ctx, q, &pss)
if err != nil {
t.Fatal(err)
}
if v := len(pss); v != circleLimit*imageLimit {
t.Errorf("unexpected: %v", v)
}
for _, ps := range pss {
for _, p := range ps {
if !strings.HasSuffix(p.Name, "ID") {
continue
}
_, ok := p.Value.(datastore.Key)
if !ok {
t.Errorf("unexpected: %T", p.Value)
}
}
}
}
}
|
/*
Copyright 2019 BlackRock, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"io/ioutil"
"os"
"testing"
"github.com/ocibuilder/ocibuilder/pkg/apis/ocibuilder/v1alpha1"
"github.com/ghodss/yaml"
"github.com/stretchr/testify/assert"
)
func TestParseDockerCommands(t *testing.T) {
path := "../testing/dummy/commands_basic_parser_test.txt"
dockerfile, err := ParseDockerCommands(&v1alpha1.DockerStep{
Path: path,
})
expectedDockerfile := "RUN pip install kubernetes\nCOPY app/ /bin/app\n"
assert.Equal(t, nil, err)
assert.Equal(t, expectedDockerfile, string(dockerfile))
}
func TestGenerateDockerfile(t *testing.T) {
file, err := ioutil.ReadFile("../testing/dummy/build.yaml")
assert.Equal(t, nil, err)
templates := []v1alpha1.BuildTemplate{{
Name: "template-1",
Cmd: []v1alpha1.BuildTemplateStep{{
Docker: &v1alpha1.DockerStep{
Path: "../testing/dummy/commands_basic_parser_test.txt",
},
},
}}}
buildSpecification := v1alpha1.BuildSpec{}
if err := yaml.Unmarshal(file, &buildSpecification); err != nil {
assert.Fail(t, "fail unmarshalling build spec")
}
path, err := GenerateDockerfile(buildSpecification.Steps[0], templates, "")
assert.Equal(t, nil, err)
defer os.Remove(path)
dockerfile, err := ioutil.ReadFile(path)
assert.Equal(t, nil, err)
assert.Equal(t, expectedDockerfile, string(dockerfile))
}
func TestGenerateDockerfileInline(t *testing.T) {
file, err := ioutil.ReadFile("../testing/dummy/build.yaml")
assert.Equal(t, nil, err)
template := []v1alpha1.BuildTemplate{{
Name: "template-1",
Cmd: []v1alpha1.BuildTemplateStep{{
Docker: &v1alpha1.DockerStep{
Inline: []string{
"ADD ./ /test-path",
"WORKDIR /test-dir",
"ENV PORT=3001",
"CMD [\"go\", \"run\", \"main.go\"]",
},
},
}},
}}
buildSpecification := v1alpha1.BuildSpec{}
if err := yaml.Unmarshal(file, &buildSpecification); err != nil {
assert.Fail(t, "fail unmarshalling build spec")
}
path, err := GenerateDockerfile(buildSpecification.Steps[0], template, "")
assert.Equal(t, nil, err)
defer os.Remove(path)
dockerfile, err := ioutil.ReadFile(path)
assert.Equal(t, nil, err)
assert.Equal(t, expectedInlineDockerfile, string(dockerfile))
path, err = GenerateDockerfile(buildSpecification.Steps[0], template, "../testing/dummy")
assert.Equal(t, nil, err)
dockerfile, err = ioutil.ReadFile("../testing/dummy/" + path)
assert.Equal(t, nil, err)
defer os.Remove("../testing/dummy/" + path)
assert.Equal(t, expectedInlineDockerfile, string(dockerfile))
}
func TestParseAnsibleCommands(t *testing.T) {
ansibleStep := &v1alpha1.AnsibleStep{
Local: &v1alpha1.AnsibleLocal{
Playbook: "/playbook.yaml",
},
}
dockerfile, err := ParseAnsibleCommands(ansibleStep)
assert.Equal(t, nil, err)
assert.Equal(t, expectedAnsibleLocalCommands, string(dockerfile), "The generated ansible local commands must match expected")
}
func TestParseAnsibleGalaxyCommands(t *testing.T) {
ansibleStepGalaxy := &v1alpha1.AnsibleStep{
Galaxy: &v1alpha1.AnsibleGalaxy{
Name: "TestGalaxy",
Requirements: "/requirements.yaml",
},
}
dockerfile, err := ParseAnsibleCommands(ansibleStepGalaxy)
assert.Equal(t, nil, err)
assert.Equal(t, expectedAnsibleGalaxyCommands, string(dockerfile), "The generated ansible galaxy comnmands must match expected")
}
const expectedAnsibleLocalCommands = `ENV PLAYBOOK_DIR /etc/ansible/
RUN mkdir -p $PLAYBOOK_DIR
WORKDIR $PLAYBOOK_DIR
COPY templates templates
COPY files files
COPY vars vars
COPY tasks tasks
ADD *.yaml ./
RUN ansible-playbook /playbook.yaml`
const expectedAnsibleGalaxyCommands = `ENV PLAYBOOK_DIR /etc/ansible/
RUN mkdir -p $PLAYBOOK_DIR
WORKDIR $PLAYBOOK_DIR
COPY templates templates
COPY files files
COPY vars vars
COPY tasks tasks
ADD *.yaml ./
RUN if [ -f /requirements.yaml ]; then annsible-galaxy install -r /requirements.yaml; fi
RUN ansible-galaxy install TestGalaxy`
const expectedInlineDockerfile = "FROM go / java / nodejs / python:ubuntu_xenial:v1.0.0 AS first-stage\nADD ./ /test-path\nWORKDIR /test-dir\nENV PORT=3001\nCMD [\"go\", \"run\", \"main.go\"]\n\nFROM alpine:latest AS second-stage\nCMD [\"echo\", \"done\"]"
const expectedDockerfile = "FROM go / java / nodejs / python:ubuntu_xenial:v1.0.0 AS first-stage\nRUN pip install kubernetes\nCOPY app/ /bin/app\n\n\nFROM alpine:latest AS second-stage\nCMD [\"echo\", \"done\"]"
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document03000101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.030.001.01 Document"`
Message *AgentCADeactivationStatusAdviceV01 `xml:"AgtCADeactvtnStsAdvc"`
}
func (d *Document03000101) AddMessage() *AgentCADeactivationStatusAdviceV01 {
d.Message = new(AgentCADeactivationStatusAdviceV01)
return d.Message
}
// Scope
// This message is sent by a CSD to an issuer (or its agent) to report the status, or a change in status, of a corporate action deactivation instruction or the status of a deactivation cancellation request.
// Usage
// This message is used to provide a status on the deactivation instruction, especially to confirm the deactivation of a Corporate Action event or option.
type AgentCADeactivationStatusAdviceV01 struct {
// Identification assigned by the Sender to unambiguously identify the status advice.
Identification *iso20022.DocumentIdentification8 `xml:"Id"`
// Identification of the linked Agent CA Deactivation Instruction for which a status is given.
AgentCADeactivationInstructionIdentification *iso20022.DocumentIdentification8 `xml:"AgtCADeactvtnInstrId"`
// Identification of the linked Agent CA Deactivation Cancellation Request for which a status is given.
AgentCADeactivationCancellationRequestIdentification *iso20022.DocumentIdentification8 `xml:"AgtCADeactvtnCxlReqId"`
// General information about the corporate action event.
CorporateActionGeneralInformation *iso20022.CorporateActionInformation1 `xml:"CorpActnGnlInf"`
// Status of the deactivation instruction sent by the issuer (agent).
DeactivationInstructionStatus []*iso20022.CorporateActionDeactivationInstructionStatus1 `xml:"DeactvtnInstrSts"`
// Status of the deactivation cancellation request sent by the issuer (agent).
DeactivationCancellationRequestStatus *iso20022.CorporateActionDeactivationCancellationStatus1Choice `xml:"DeactvtnCxlReqSts"`
}
func (a *AgentCADeactivationStatusAdviceV01) AddIdentification() *iso20022.DocumentIdentification8 {
a.Identification = new(iso20022.DocumentIdentification8)
return a.Identification
}
func (a *AgentCADeactivationStatusAdviceV01) AddAgentCADeactivationInstructionIdentification() *iso20022.DocumentIdentification8 {
a.AgentCADeactivationInstructionIdentification = new(iso20022.DocumentIdentification8)
return a.AgentCADeactivationInstructionIdentification
}
func (a *AgentCADeactivationStatusAdviceV01) AddAgentCADeactivationCancellationRequestIdentification() *iso20022.DocumentIdentification8 {
a.AgentCADeactivationCancellationRequestIdentification = new(iso20022.DocumentIdentification8)
return a.AgentCADeactivationCancellationRequestIdentification
}
func (a *AgentCADeactivationStatusAdviceV01) AddCorporateActionGeneralInformation() *iso20022.CorporateActionInformation1 {
a.CorporateActionGeneralInformation = new(iso20022.CorporateActionInformation1)
return a.CorporateActionGeneralInformation
}
func (a *AgentCADeactivationStatusAdviceV01) AddDeactivationInstructionStatus() *iso20022.CorporateActionDeactivationInstructionStatus1 {
newValue := new(iso20022.CorporateActionDeactivationInstructionStatus1)
a.DeactivationInstructionStatus = append(a.DeactivationInstructionStatus, newValue)
return newValue
}
func (a *AgentCADeactivationStatusAdviceV01) AddDeactivationCancellationRequestStatus() *iso20022.CorporateActionDeactivationCancellationStatus1Choice {
a.DeactivationCancellationRequestStatus = new(iso20022.CorporateActionDeactivationCancellationStatus1Choice)
return a.DeactivationCancellationRequestStatus
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ingest
import (
"context"
"math"
"path/filepath"
"sync/atomic"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
"github.com/pingcap/tidb/br/pkg/lightning/checkpoints"
lightning "github.com/pingcap/tidb/br/pkg/lightning/config"
tidb "github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/size"
"go.uber.org/zap"
)
// ImporterRangeConcurrencyForTest is only used for test.
var ImporterRangeConcurrencyForTest *atomic.Int32
// Config is the configuration for the lightning local backend used in DDL.
type Config struct {
Lightning *lightning.Config
KeyspaceName string
IsRaftKV2 bool
}
func genConfig(ctx context.Context, memRoot MemRoot, jobID int64, unique bool, isRaftKV2 bool) (*Config, error) {
tidbCfg := tidb.GetGlobalConfig()
cfg := lightning.NewConfig()
cfg.TikvImporter.Backend = lightning.BackendLocal
// Each backend will build a single dir in lightning dir.
cfg.TikvImporter.SortedKVDir = filepath.Join(LitSortPath, EncodeBackendTag(jobID))
if ImporterRangeConcurrencyForTest != nil {
cfg.TikvImporter.RangeConcurrency = int(ImporterRangeConcurrencyForTest.Load())
}
err := cfg.AdjustForDDL()
if err != nil {
logutil.Logger(ctx).Warn(LitWarnConfigError, zap.Error(err))
return nil, err
}
adjustImportMemory(ctx, memRoot, cfg)
cfg.Checkpoint.Enable = true
if unique {
cfg.TikvImporter.DuplicateResolution = lightning.DupeResAlgErr
// TODO(lance6716): will introduce fail-fast for DDL usage later
cfg.Conflict.Threshold = math.MaxInt64
} else {
cfg.TikvImporter.DuplicateResolution = lightning.DupeResAlgNone
}
cfg.TiDB.PdAddr = tidbCfg.Path
cfg.TiDB.Host = "127.0.0.1"
cfg.TiDB.StatusPort = int(tidbCfg.Status.StatusPort)
// Set TLS related information
cfg.Security.CAPath = tidbCfg.Security.ClusterSSLCA
cfg.Security.CertPath = tidbCfg.Security.ClusterSSLCert
cfg.Security.KeyPath = tidbCfg.Security.ClusterSSLKey
// in DDL scenario, we don't switch import mode
cfg.Cron.SwitchMode = lightning.Duration{Duration: 0}
c := &Config{
Lightning: cfg,
KeyspaceName: tidb.GetGlobalKeyspaceName(),
IsRaftKV2: isRaftKV2,
}
return c, err
}
var (
compactMemory = 1 * size.GB
compactConcurrency = 4
)
func generateLocalEngineConfig(id int64, dbName, tbName string) *backend.EngineConfig {
return &backend.EngineConfig{
Local: backend.LocalEngineConfig{
Compact: true,
CompactThreshold: int64(compactMemory),
CompactConcurrency: compactConcurrency,
},
TableInfo: &checkpoints.TidbTableInfo{
ID: id,
DB: dbName,
Name: tbName,
},
KeepSortDir: true,
}
}
// adjustImportMemory adjusts the lightning memory parameters according to the memory root's max limitation.
func adjustImportMemory(ctx context.Context, memRoot MemRoot, cfg *lightning.Config) {
var scale int64
// Try aggressive resource usage successful.
if tryAggressiveMemory(ctx, memRoot, cfg) {
return
}
defaultMemSize := int64(cfg.TikvImporter.LocalWriterMemCacheSize) * int64(cfg.TikvImporter.RangeConcurrency)
defaultMemSize += 4 * int64(cfg.TikvImporter.EngineMemCacheSize)
logutil.Logger(ctx).Info(LitInfoInitMemSetting,
zap.Int64("local writer memory cache size", int64(cfg.TikvImporter.LocalWriterMemCacheSize)),
zap.Int64("engine memory cache size", int64(cfg.TikvImporter.EngineMemCacheSize)),
zap.Int("range concurrency", cfg.TikvImporter.RangeConcurrency))
maxLimit := memRoot.MaxMemoryQuota()
scale = defaultMemSize / maxLimit
if scale == 1 || scale == 0 {
return
}
cfg.TikvImporter.LocalWriterMemCacheSize /= lightning.ByteSize(scale)
cfg.TikvImporter.EngineMemCacheSize /= lightning.ByteSize(scale)
// TODO: adjust range concurrency number to control total concurrency in the future.
logutil.Logger(ctx).Info(LitInfoChgMemSetting,
zap.Int64("local writer memory cache size", int64(cfg.TikvImporter.LocalWriterMemCacheSize)),
zap.Int64("engine memory cache size", int64(cfg.TikvImporter.EngineMemCacheSize)),
zap.Int("range concurrency", cfg.TikvImporter.RangeConcurrency))
}
// tryAggressiveMemory lightning memory parameters according memory root's max limitation.
func tryAggressiveMemory(ctx context.Context, memRoot MemRoot, cfg *lightning.Config) bool {
var defaultMemSize int64
defaultMemSize = int64(int(cfg.TikvImporter.LocalWriterMemCacheSize) * cfg.TikvImporter.RangeConcurrency)
defaultMemSize += int64(cfg.TikvImporter.EngineMemCacheSize)
if (defaultMemSize + memRoot.CurrentUsage()) > memRoot.MaxMemoryQuota() {
return false
}
logutil.Logger(ctx).Info(LitInfoChgMemSetting,
zap.Int64("local writer memory cache size", int64(cfg.TikvImporter.LocalWriterMemCacheSize)),
zap.Int64("engine memory cache size", int64(cfg.TikvImporter.EngineMemCacheSize)),
zap.Int("range concurrency", cfg.TikvImporter.RangeConcurrency))
return true
}
// defaultImportantVariables is used in obtainImportantVariables to retrieve the system
// variables from downstream which may affect KV encode result. The values record the default
// values if missing.
var defaultImportantVariables = map[string]string{
"max_allowed_packet": "67108864", // 64MB
"div_precision_increment": "4",
"time_zone": "SYSTEM",
"lc_time_names": "en_US",
"default_week_format": "0",
"block_encryption_mode": "aes-128-ecb",
"group_concat_max_len": "1024",
"tidb_row_format_version": "1",
}
|
package commands
import (
"fmt"
"os"
"regexp"
"github.com/qubitz/lawyer/laws"
"github.com/qubitz/lawyer/trial"
"github.com/TwinProduction/go-color"
)
type indictCommand struct {
paths []string
lawPath string
}
func (indictment *indictCommand) Execute() error {
law, err := laws.RetrieveFrom(indictment.lawPath)
if err != nil {
return fmt.Errorf("unable to retrieve law contents\n%w", err)
}
for _, path := range indictment.paths {
err := indict(path, law.Expected)
if err != nil {
fmt.Print(color.Yellow)
fmt.Printf("%v dismissed\n", path)
fmt.Println(err.Error())
fmt.Print(color.Reset)
}
}
fmt.Println("indictment complete")
return nil
}
func indict(path string, expected regexp.Regexp) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
innocent, evidence := trial.Conduct(expected, file)
fmt.Printf("%v is %v\n", path, trial.ToVerdict(innocent))
if !innocent {
fmt.Println("\n" + trial.FormatEvidence(evidence))
}
return nil
}
|
package main
import (
//_ "image/gif"
//_ "image/jpeg"
_ "image/png"
"github.com/360EntSecGroup-Skylar/excelize"
)
func main() {
f := excelize.NewFile()
// Insert a picture.
if err := f.AddPicture("Sheet1", "A2", "mypng.png", ""); err != nil {
println(err.Error())
}
// Insert a picture to worksheet with scaling.
if err := f.AddPicture("Sheet1", "D2", "mypng.png", `{"x_scale": 0.5, "y_scale": 0.5}`); err != nil {
println(err.Error())
}
// Insert a picture offset in the cell with printing support.
if err := f.AddPicture("Sheet1", "H2", "mypng.png", `{"x_offset": 15, "y_offset": 10, "print_obj": true, "lock_aspect_ratio": false, "locked": false}`); err != nil {
println(err.Error())
}
// Save the xlsx file with the origin path.
if err := f.SaveAs("Pictures.xlsx"); err != nil {
println(err.Error())
}
}
|
package update
import "core"
// CheckAndUpdate is a stub implementation that does nothing.
func CheckAndUpdate(config *core.Configuration, updatesEnabled, updateCommand, forceUpdate, verify bool) {
}
// DownloadPyPy is also a stub that does nothing.
func DownloadPyPy(config *core.Configuration) bool {
return false
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
)
func part1() {
// Assumes current working directory is `day-02/`!
fileContent, err := ioutil.ReadFile("puzzle-input.txt")
if err != nil {
fmt.Println(err)
}
listOfIDs := strings.Split(string(fileContent), "\n")
repeatCount := make(map[int]int)
for _, boxID := range listOfIDs {
charCount := make(map[string]int)
chars := strings.Split(boxID, "")
for _, char := range chars {
if _, ok := charCount[char]; ok {
charCount[char]++
} else {
charCount[char] = 1
}
}
uniqueOccurence := make(map[int]int)
for _, count := range charCount {
if _, ok := uniqueOccurence[count]; !ok {
uniqueOccurence[count] = 1
} else {
uniqueOccurence[count]++
}
}
for occurrence := range uniqueOccurence {
if _, ok := repeatCount[occurrence]; !ok {
repeatCount[occurrence] = 1
} else {
repeatCount[occurrence]++
}
}
}
fmt.Printf("CHECKSUM: %v\n", repeatCount[2]*repeatCount[3])
}
|
// +build linux
/*
Copyright (c) 2018 GigaSpaces Technologies Ltd. All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package container - functions for "cfy local" style operations.
*/
package container
import (
"fmt"
"os"
"path"
"strings"
"syscall"
"time"
)
func makedev(major int, minor int) int {
return (minor & 0xff) | (major&0xfff)<<8
}
func createDirInContainer(combinedDir string) {
// reset umask befor do anything
oldUmask := syscall.Umask(0)
// create and mount all dirs
sysDir := path.Join(combinedDir, "/sys")
os.RemoveAll(sysDir)
if err := os.Mkdir(sysDir,
syscall.S_IRUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
procDir := path.Join(combinedDir, "/proc")
os.RemoveAll(procDir)
if err := os.Mkdir(procDir,
syscall.S_IRUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
tmpDir := path.Join(combinedDir, "/tmp")
if _, err := os.Stat(tmpDir); err != nil {
if err := os.Mkdir(tmpDir,
syscall.S_IRUSR|syscall.S_IWUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IWGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IWOTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
}
devDir := path.Join(combinedDir, "/dev")
os.RemoveAll(devDir)
if err := os.Mkdir(devDir,
syscall.S_IRUSR|syscall.S_IWUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
if err := syscall.Mknod(path.Join(devDir, "/full"),
syscall.S_IRUSR|syscall.S_IWUSR|
syscall.S_IRGRP|syscall.S_IWGRP|
syscall.S_IROTH|syscall.S_IWOTH|
syscall.S_IFCHR, makedev(1, 7)); err != nil {
fmt.Printf("mknod /dev/full: %s\n", err.Error())
}
if err := syscall.Mknod(path.Join(devDir, "/ptmx"),
syscall.S_IRUSR|syscall.S_IWUSR|
syscall.S_IRGRP|syscall.S_IWGRP|
syscall.S_IROTH|syscall.S_IWOTH|
syscall.S_IFCHR, makedev(5, 2)); err != nil {
fmt.Printf("mknod /dev/ptmx: %s\n", err.Error())
}
if err := syscall.Mknod(path.Join(devDir, "/random"),
syscall.S_IRUSR|syscall.S_IWUSR|
syscall.S_IRGRP|syscall.S_IROTH|
syscall.S_IFCHR, makedev(1, 8)); err != nil {
fmt.Printf("mknod /dev/random: %s\n", err.Error())
}
if err := syscall.Mknod(path.Join(devDir, "/urandom"),
syscall.S_IRUSR|syscall.S_IWUSR|
syscall.S_IRGRP|syscall.S_IROTH|
syscall.S_IFCHR, makedev(1, 9)); err != nil {
fmt.Printf("mknod /dev/urandom: %s\n", err.Error())
}
if err := syscall.Mknod(path.Join(devDir, "/zero"),
syscall.S_IRUSR|syscall.S_IWUSR|
syscall.S_IRGRP|syscall.S_IWGRP|
syscall.S_IROTH|syscall.S_IWOTH|
syscall.S_IFCHR, makedev(1, 5)); err != nil {
fmt.Printf("mknod /dev/zero: %s\n", err.Error())
}
if err := syscall.Mknod(path.Join(devDir, "/tty"),
syscall.S_IRUSR|syscall.S_IWUSR|
syscall.S_IRGRP|syscall.S_IWGRP|
syscall.S_IROTH|syscall.S_IWOTH|
syscall.S_IFCHR, makedev(5, 0)); err != nil {
fmt.Printf("mknod /dev/tty: %s\n", err.Error())
}
// go back with rights
syscall.Umask(oldUmask)
}
func mountEverythingAndRun(combinedDir string, argv0 string, argv []string) int {
fmt.Printf("I am going to run: %+v\n", strings.Join(argv, " "))
procDir := path.Join(combinedDir, "/proc")
if err := syscall.Mount("proc", procDir, "proc",
syscall.MS_NODEV|syscall.MS_NOEXEC|syscall.MS_NOSUID, ""); err != nil {
fmt.Printf("mount proc: %s\n", err.Error())
return 1
}
defer syscall.Unmount(procDir, syscall.MNT_DETACH)
var procInfo syscall.SysProcAttr
procInfo.Chroot = combinedDir
var env syscall.ProcAttr
env.Env = []string{"PATH=/usr/sbin:/usr/bin:/sbin:/bin"}
// TODO: hackish way, but ok for now
env.Files = []uintptr{0, 1, 2}
env.Sys = &procInfo
env.Dir = "/"
pid, err := syscall.ForkExec(argv0, argv, &env)
if err != nil {
fmt.Printf("Issues with run: %s\n", err.Error())
return 1
}
syscall.Wait4(pid, nil, 0, nil)
fmt.Printf("Wait 10 seconds before revert everything.\n")
time.Sleep(10 * time.Second)
return 0
}
// Run - execute command inside controller
func Run(baseDir, dataDir, tempDir string, commandList []string) int {
fmt.Printf("As Operation System filesystem will be used: %s\n", baseDir)
if _, err := os.Stat(dataDir); err != nil {
if err := os.Mkdir(dataDir,
syscall.S_IRUSR|syscall.S_IWUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
}
fmt.Printf("Data changes will be stored in: %s\n", dataDir)
workDir := path.Join(tempDir, "work")
if err := os.Mkdir(workDir,
syscall.S_IRUSR|syscall.S_IWUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
// try to delete, on error
defer os.RemoveAll(workDir)
combinedDir := path.Join(tempDir, "overlay")
if err := os.Mkdir(combinedDir,
syscall.S_IRUSR|syscall.S_IWUSR|syscall.S_IXUSR|
syscall.S_IRGRP|syscall.S_IXGRP|
syscall.S_IROTH|syscall.S_IXOTH); err != nil {
fmt.Printf("Not critical: %s\n", err.Error())
}
// try to delete, on error
defer os.RemoveAll(combinedDir)
// https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt
mountOptions := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", baseDir, dataDir, workDir)
// mount overlayfs
if err := syscall.Mount("overlay", combinedDir, "overlay", 0, mountOptions); err != nil {
fmt.Printf("Overlay fs: %s\n", err.Error())
return 1
}
// try to delete, on error
defer syscall.Unmount(combinedDir, syscall.MNT_DETACH)
createDirInContainer(combinedDir)
// real work
return mountEverythingAndRun(combinedDir, commandList[0], commandList)
}
|
package sum
import "testing"
func TestSum(t *testing.T) {
result := Sum(1,1)
if result != 2 {
t.Errorf("%d does not equal 2", result)
}
}
|
/*
nightHawkAPI.main;
*/
package main
import (
"flag"
"fmt"
"log"
"net/http"
"nighthawk"
api "nighthawkapi/api/core"
routes "nighthawkapi/api/routes"
"os"
)
type RuntimeOptions struct {
Debug, Help bool
Server string
Port int
Version bool
}
func fUsage() {
fmt.Printf("\tnightHawkAPI v%s, by Team nightHawk (Daniel Eden & Roshan Maskey).\n", api.VERSION)
fmt.Printf("Usage: %s [OPTIONS] argument ...\n", os.Args[0])
flag.PrintDefaults()
os.Exit(0)
}
func main() {
flag.Usage = fUsage
var runopt RuntimeOptions
flag.BoolVar(&runopt.Help, "h", false, "Display use flags.")
flag.BoolVar(&runopt.Debug, "d", false, "Turn on console level debugging.")
flag.StringVar(&runopt.Server, "s", "localhost", "Bind server to address. Default: localhost")
flag.IntVar(&runopt.Port, "p", 8080, "Bind server to port. Default: 8080")
flag.BoolVar(&runopt.Version, "version", false, "Show version information")
flag.Parse()
if runopt.Help {
fUsage()
}
if runopt.Version {
nighthawk.ShowVersion("API Server")
os.Exit(0)
}
if runopt.Debug {
api.DEBUG = true
}
if runopt.Server != "" || runopt.Port != 8080 {
go api.Manager.Start()
router := routes.NewRouter()
api.LogDebug(api.DEBUG, fmt.Sprintf("[-] Serving on %s", fmt.Sprintf("%s:%d", runopt.Server, runopt.Port)))
log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%d", runopt.Server, runopt.Port), router))
}
}
|
// Copyright (c) 2016 Readium Foundation
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// 3. Neither the name of the organization nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package weblicense
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"github.com/readium/readium-lcp-server/config"
)
// License status
const (
StatusDraft string = "draft"
StatusEncrypting string = "encrypting"
StatusError string = "error"
StatusOk string = "ok"
)
// ErrNotFound error trown when license is not found
var ErrNotFound = errors.New("License not found")
// WebLicense interface for license db interaction
type WebLicense interface {
Get(id int64) (License, error)
GetFiltered(filter string) ([]License, error)
Add(license License) error
AddFromJSON(licensesJSON []byte) error
PurgeDataBase() error
Update(license License) error
Delete(id int64) error
}
// License struct defines a license
type License struct {
ID string `json:""`
PublicationTitle string `json:"publication_title"`
UserName string `json:"user_name"`
Type string `json:"type"`
UUID string `json:"id"`
DeviceCount int `json:"device_count"`
Status string `json:"status"`
PurchaseID int `json:"purchase_id"`
Message string `json:"message"`
}
// Licenses struct defines a licenses array to be transfered
type Licenses []struct {
ID string `json:""`
UUID string `json:"id"`
Status string `json:"status"`
DeviceCount int `json:"device_count"`
Message string `json:"message"`
}
// LicenseManager helper
type LicenseManager struct {
config config.Configuration
db *sql.DB
}
// Get a license for a given ID
//
func (licManager LicenseManager) Get(id int64) (License, error) {
dbGetByID, err := licManager.db.Prepare(`SELECT l.uuid, pu.title, u.name, p.type, l.device_count, l.status, p.id, l.message FROM license_view AS l
INNER JOIN purchase as p ON l.uuid = p.license_uuid
INNER JOIN publication as pu ON p.publication_id = pu.id
INNER JOIN user as u ON p.user_id = u.id
WHERE id = ?`)
if err != nil {
return License{}, err
}
defer dbGetByID.Close()
records, err := dbGetByID.Query(id)
if records.Next() {
var lic License
err = records.Scan(
&lic.ID,
&lic.PublicationTitle,
&lic.UserName,
&lic.Type,
&lic.DeviceCount,
&lic.Status,
&lic.PurchaseID,
&lic.Message)
records.Close()
return lic, err
}
return License{}, ErrNotFound
}
// GetFiltered give a license with more than the filtered number
//
func (licManager LicenseManager) GetFiltered(filter string) ([]License, error) {
dbGetByID, err := licManager.db.Prepare(`SELECT l.uuid, pu.title, u.name, p.type, l.device_count, l.status, p.id, l.message FROM license_view AS l
INNER JOIN purchase as p ON l.uuid = p.license_uuid
INNER JOIN publication as pu ON p.publication_id = pu.id
INNER JOIN user as u ON p.user_id = u.id
WHERE l.device_count >= ?`)
if err != nil {
return []License{}, err
}
defer dbGetByID.Close()
records, err := dbGetByID.Query(filter)
licences := make([]License, 0, 20)
for records.Next() {
var lic License
err = records.Scan(
&lic.ID,
&lic.PublicationTitle,
&lic.UserName,
&lic.Type,
&lic.DeviceCount,
&lic.Status,
&lic.PurchaseID,
&lic.Message)
if err != nil {
fmt.Println(err)
}
licences = append(licences, lic)
}
records.Close()
return licences, nil
}
// Add adds a new license
//
func (licManager LicenseManager) Add(licenses License) error {
add, err := licManager.db.Prepare("INSERT INTO license_view (uuid, device_count, status, message) VALUES (?, ?, ?, ?)")
if err != nil {
return err
}
defer add.Close()
_, err = add.Exec(licenses.UUID, licenses.DeviceCount, licenses.Status, licenses.Message)
if err != nil {
return err
}
return nil
}
// AddFromJSON adds a new license from a JSON string
//
func (licManager LicenseManager) AddFromJSON(licensesJSON []byte) error {
var licenses Licenses
err := json.Unmarshal(licensesJSON, &licenses)
if err != nil {
return err
}
for _, l := range licenses {
add, err := licManager.db.Prepare("INSERT INTO license_view (uuid, device_count, status, message) VALUES (?, ?, ?, ?)")
if err != nil {
return err
}
defer add.Close()
_, err = add.Exec(l.UUID, l.DeviceCount, l.Status, l.Message)
if err != nil {
return err
}
}
return nil
}
// PurgeDataBase erases all the content of the license_view table
//
func (licManager LicenseManager) PurgeDataBase() error {
dbPurge, err := licManager.db.Prepare("DELETE FROM license_view")
if err != nil {
return err
}
defer dbPurge.Close()
_, err = dbPurge.Exec()
return err
}
// Update updates a license
//
func (licManager LicenseManager) Update(lic License) error {
dbUpdate, err := licManager.db.Prepare("UPDATE license_view SET device_count=?, uuid=?, status=? , message=? WHERE id = ?")
if err != nil {
return err
}
defer dbUpdate.Close()
_, err = dbUpdate.Exec(
lic.DeviceCount,
lic.Status,
lic.UUID,
lic.ID,
lic.Message)
if err != nil {
return err
}
return err
}
// Delete deletes a license
//
func (licManager LicenseManager) Delete(id int64) error {
// delete a license
dbDelete, err := licManager.db.Prepare("DELETE FROM license_view WHERE id = ?")
if err != nil {
log.Println("Error deleting license_view table")
return err
}
defer dbDelete.Close()
_, err = dbDelete.Exec(id)
return err
}
// Init inits the license manager
//
func Init(config config.Configuration, db *sql.DB) (i WebLicense, err error) {
// if sqlite, create the content table in the frontend db if it does not exist
if strings.HasPrefix(config.FrontendServer.Database, "sqlite") {
_, err = db.Exec(tableDef)
if err != nil {
log.Println("Error creating license_view table")
return
}
}
i = LicenseManager{config, db}
return
}
const tableDef = "CREATE TABLE IF NOT EXISTS license_view (" +
"id integer NOT NULL PRIMARY KEY," +
"uuid varchar(255) NOT NULL," +
"device_count integer NOT NULL," +
"status varchar(255) NOT NULL," +
"message varchar(255) NOT NULL)"
|
package pathrename
import (
index "github.com/begopher/index/v2"
)
type Config interface {
Flag() Flag // flag determine whether reniming aogothitem apply on a file or dir
DirIndexes(string) (start int, end int) // rename start at index
FileIndexes(string) (start int, end int) // rename end at index
}
type immutableConfig struct {
flag Flag
dirStart index.Index
dirEnd index.Index
fileStart index.Index
fileEnd index.Index
}
func (c immutableConfig) Flag() Flag { return c.flag }
func (c immutableConfig) DirIndexes(value string) (start int, end int) {
start = c.dirStart.Get(value)
end = c.dirEnd.Get(value)
return start, end
}
func (c immutableConfig) FileIndexes(value string) (start int, end int) {
start = c.fileStart.Get(value)
end = c.fileEnd.Get(value)
return start, end
}
func DefaultConfig(flag Flag) Config {
start := index.Zero()
length := index.Length()
dot, _ := index.Strict(".", 1, true, true)
fileEnd := index.Either(dot, length)
return immutableConfig{
flag: flag,
dirStart: start,
dirEnd: length,
fileStart: start,
fileEnd: fileEnd,
}
}
func NewConfig(flag Flag, fileStart, fileEnd, dirStart, dirEnd index.Index) Config {
return immutableConfig{
flag: flag,
dirStart: dirStart,
dirEnd: dirEnd,
fileStart: fileStart,
fileEnd: fileEnd,
}
}
func undoConfig() Config {
config := NewConfig(UFile|HFile|UDirectory|HDirectory, index.Zero(), index.Length(), index.Zero(), index.Length())
return config
}
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"sort"
"strings"
)
type PType struct {
Name string
Help string
Type string
Counters []PCounter
}
type PCounter struct {
Labels string
Value uint64
}
type VCounter struct {
Description string `json:"description"`
Flag string `json:"flag"`
Format string `json:"format"`
Value uint64 `json:"value"`
}
func check(e error, format string, args ...interface{}) { // Check the error for all the functions that return one
if e != nil {
fmt.Printf("error: "+format, args...)
fmt.Printf(" (%s)\n", e)
os.Exit(1)
}
}
func main() {
var reader io.Reader
var varnishstatPath, inputSource string
flag.StringVar(&inputSource, "input", "", "Empty use varnishstat as source, \"-\" use stdin, anything else assumes it's a readable file")
flag.StringVar(&varnishstatPath, "bin-path", "/usr/bin/varnishstat", "Supply the varnishstat path.")
flag.Parse()
counters := make(map[string](json.RawMessage))
if inputSource == "-" {
reader = os.Stdin
} else if inputSource == "" {
out, err := exec.Command(varnishstatPath, "-j").Output()
check(err, "Could not run the supplied varnishstat file \"%v\".", varnishstatPath)
reader = bytes.NewReader(out)
} else {
content, err := ioutil.ReadFile(inputSource)
check(err, "Could not read the input file \"%v\".", inputSource)
reader = bytes.NewReader(content)
}
err := json.NewDecoder(reader).Decode(&counters)
check(err, "Could not decode json data.")
fmt.Print(counter2prometheusWrapper(counters))
}
func counter2prometheusWrapper(counters map[string](json.RawMessage)) (returnString string) {
var err error
var pts []PType
for k, o := range counters {
if k == "timestamp" {
continue
}
var c VCounter
err = json.Unmarshal(o, &c)
check(err, "Could not unmarshal json data.")
pt := counter2prometheus(k, c)
found := false
for i, _ := range pts {
if pts[i].Name != pt.Name {
continue
}
pts[i].Counters = append(pts[i].Counters, pt.Counters[0])
found = true
break
}
if !found {
pts = append(pts, pt)
}
}
sort.SliceStable(pts, func(i, j int) bool { return pts[i].Name < pts[j].Name })
var returnStr string
for _, pt := range pts {
sort.SliceStable(pt.Counters, func(i, j int) bool { return pt.Counters[i].Labels < pt.Counters[j].Labels })
returnStr += fmt.Sprintf("# HELP %v %v\n# TYPE %v %v\n", pt.Name, pt.Help, pt.Name, pt.Type)
for _, pc := range pt.Counters {
returnStr += fmt.Sprintf("%v%v %d\n", pt.Name, pc.Labels, pc.Value)
}
}
return (returnStr)
}
func flag2type(f string) string {
var t string
switch f {
case "c":
t = "counter"
case "g", "b":
t = "gauge"
default:
t = "untyped" // This also covers the "b" for binary case.
}
return t
}
// https://prometheus.io/docs/concepts/data_model defines the valid regex syntax as "[a-zA-Z_:][a-zA-Z0-9_:]*".
// Note colons, ':', are removed below from the regex as the input does not contain user defined recording rules.
func counter2prometheus(k string, c VCounter) PType {
cleanRe := regexp.MustCompile(`[^a-zA-Z0-9_]+`) // Replace non syntax allowed characters with underscores.
splits := strings.SplitN(k, ".", 2)
section := ""
name := ""
labels := ""
if len(splits) == 1 {
section = "unknown"
name = splits[0]
} else {
section = strings.ToLower(splits[0])
name = splits[1]
}
switch section {
case "kvstore":
sub := strings.SplitN(name, ".", 3)
name = "counter"
labels = fmt.Sprintf(`{vcl="%s",space="%s",name="%s"}`, sub[1], sub[0], sub[2])
case "lck":
section = "lock"
sub := strings.SplitN(name, ".", 2)
obj := sub[0]
t := sub[1]
switch t {
case "creat":
t = "created"
case "destroy":
t = "destroyed"
case "locks":
t = "operations"
}
name = obj
labels = fmt.Sprintf("{target=\"%s\"}", t)
case "main":
switch name {
case "s_sess":
name = "sessions_total"
case "s_fetch":
name = "fetch_total"
default:
if strings.HasPrefix(name, "sess_") || strings.HasPrefix(name, "fetch_") {
split := strings.SplitN(name, "_", 2)
subsection := split[0]
if subsection == "sess_" {
subsection = "session"
}
name = subsection
labels = fmt.Sprintf("{type=\"%s\"}", split[1])
}
}
case "mse", "mse_book", "mse_store", "sma", "smf":
extra := ""
switch section {
case "mse":
extra = `,type="env"`
case "mse_book":
section = "mse"
extra = `,type="book"`
case "mse_store":
section = "mse"
extra = `,type="store"`
}
split := strings.SplitN(name, ".", 2)
name = split[1]
labels = fmt.Sprintf("{%s=\"%s\"%s}", "id", split[0], extra)
case "vbe":
section = "backend"
// looking for "{vcl}.goto.{id}.({ip}).({url}).(ttl:{ttl}).{domain}"
// e.g. default.goto.00000000.(8.8.8.8).(http://example.com:80).(ttl:10.000000).bereq_bodybytes
gotoRe := regexp.MustCompile(`^(.*).goto.([0-9]+)\.\((.+)\)\.\((.*)\)\.\(ttl:([0-9]+\.[0-9]+)\)\.(.+)`)
matches := gotoRe.FindStringSubmatch(name)
if matches != nil {
name = matches[6]
labels = fmt.Sprintf(`{backend="goto",vcl="%s",domain="%s",ip="%s",ttl="%s",id="%s"}`, matches[1], matches[4], matches[3], matches[5], matches[2])
} else {
split := strings.SplitN(name, ".", 3)
name = cleanRe.ReplaceAllLiteralString(split[2], "_")
labels = fmt.Sprintf("{backend=\"%s\",vcl=\"%s\"}", split[1], split[0])
}
case "mempool":
split := strings.SplitN(name, ".", 2)
name = split[1]
labels = fmt.Sprintf("{id=\"%s\"}", split[0])
default:
name = cleanRe.ReplaceAllLiteralString(name, "_")
}
pt := PType{
Name: fmt.Sprintf("varnish_%s_%s", section, name),
Help: c.Description,
Type: flag2type(c.Flag),
Counters: []PCounter{
PCounter{
Labels: labels,
Value: c.Value,
},
},
}
return pt
}
|
package mqtt
const (
QOS_0 = iota
QOS_1
QOS_2
)
|
package observe
import (
"fmt"
"github.com/nokamoto/grpc-proxy/yaml"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc/codes"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
)
func TestProm_NewProm(t *testing.T) {
yml, err := yaml.NewYaml("../testdata/yaml/prom_new.yaml")
if err != nil {
t.Fatal(err)
}
_, err = NewProm(yml.Observe.Prom[0])
if err != nil {
t.Fatal(err)
}
_, err = NewProm(yml.Observe.Prom[1])
if err != nil {
t.Fatal(err)
}
}
func TestProm_NewProm_duplicated(t *testing.T) {
yml, err := yaml.NewYaml("../testdata/yaml/prom_duplicated.yaml")
if err != nil {
t.Fatal(err)
}
_, err = NewProm(yml.Observe.Prom[0])
if err != nil {
t.Fatal(err)
}
_, err = NewProm(yml.Observe.Prom[1])
if err == nil {
t.Fatal()
}
}
func TestProm_Observe(t *testing.T) {
port := 9000
afterEachProm := beforeEachProm(t, port)
defer afterEachProm()
yml, err := yaml.NewYaml("../testdata/yaml/prom.yaml")
if err != nil {
t.Fatal(err)
}
p, err := NewProm(yml.Observe.Prom[0])
if err != nil {
t.Fatal(err)
}
write := func(method string, code codes.Code, req int, res int, nanos time.Duration) {
err = p.Observe(method, code, req, res, nanos)
if err != nil {
t.Fatal(err)
}
}
x := "x"
y := "y"
write(x, codes.OK, 127, 255, 1500*time.Millisecond)
write(x, codes.OK, 255, 127, 750*time.Millisecond)
write(y, codes.OK, 511, 63, 250*time.Millisecond)
counter := func(method string, code codes.Code, n int) {
retriveProm(t, fmt.Sprintf(`default_request_count{method="%s",status="%s"} %d`, method, code, n), port)
}
counter(x, codes.OK, 2)
counter(y, codes.OK, 1)
hist := func(bucket string, method string, code codes.Code, le string, n int) {
retriveProm(t, fmt.Sprintf(`default_%s{method="%s",status="%s",le="%s"} %d`, bucket, method, code, le, n), port)
}
latency := "latency_seconds_bucket"
hist(latency, x, codes.OK, "+Inf", 2)
hist(latency, x, codes.OK, "1", 1)
hist(latency, x, codes.OK, "0.5", 0)
hist(latency, y, codes.OK, "+Inf", 1)
hist(latency, y, codes.OK, "1", 1)
hist(latency, y, codes.OK, "0.5", 1)
request := "request_bytes_bucket"
hist(request, x, codes.OK, "+Inf", 2)
hist(request, x, codes.OK, "256", 2)
hist(request, x, codes.OK, "128", 1)
hist(request, y, codes.OK, "+Inf", 1)
hist(request, y, codes.OK, "256", 0)
hist(request, y, codes.OK, "128", 0)
response := "response_bytes_bucket"
hist(response, x, codes.OK, "+Inf", 2)
hist(response, x, codes.OK, "128", 1)
hist(response, x, codes.OK, "64", 0)
hist(response, y, codes.OK, "+Inf", 1)
hist(response, y, codes.OK, "128", 1)
hist(response, y, codes.OK, "64", 1)
}
func retriveProm(t *testing.T, expected string, port int) {
res, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
bytes, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
for _, line := range strings.Split(string(bytes), "\n") {
if expected == line {
return
}
}
t.Fatalf("%s not found: %s", expected, string(bytes))
}
func beforeEachProm(t *testing.T, port int) func() {
http.Handle("/metrics", promhttp.Handler())
srv := http.Server{Addr: fmt.Sprintf(":%d", port), Handler: nil}
go func() {
srv.ListenAndServe()
}()
return func() {
srv.Close()
}
}
|
package generator
import (
"testing"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/format"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/pomerium/pomerium/pkg/policy/parser"
)
func Test(t *testing.T) {
g := New(WithCriterion(func(g *Generator) Criterion {
return NewCriterionFunc(CriterionDataTypeUnused, "accept", func(subPath string, data parser.Value) (rule *ast.Rule, additionalRules []*ast.Rule, err error) {
rule = g.NewRule("accept")
rule.Body = append(rule.Body, ast.MustParseExpr("1 == 1"))
return rule, nil, nil
})
}))
mod, err := g.Generate(&parser.Policy{
Rules: []parser.Rule{
{
Action: parser.ActionAllow,
And: []parser.Criterion{
{Name: "accept"},
{Name: "accept"},
{Name: "accept"},
},
Or: []parser.Criterion{
{Name: "accept"},
{Name: "accept"},
{Name: "accept"},
},
Not: []parser.Criterion{
{Name: "accept"},
{Name: "accept"},
{Name: "accept"},
},
Nor: []parser.Criterion{
{Name: "accept"},
{Name: "accept"},
{Name: "accept"},
},
},
{
Action: parser.ActionAllow,
And: []parser.Criterion{
{Name: "accept"},
},
},
{
Action: parser.ActionDeny,
Nor: []parser.Criterion{
{Name: "accept"},
{Name: "accept"},
},
},
},
})
require.NoError(t, err)
assert.Equal(t, `package pomerium.policy
default allow = [false, set()]
default deny = [false, set()]
accept_0 {
1 == 1
}
accept_1 {
1 == 1
}
accept_2 {
1 == 1
}
and_0 = v {
results := [accept_0, accept_1, accept_2]
normalized := [normalize_criterion_result(x) | x := results[i]]
v := merge_with_and(normalized)
}
accept_3 {
1 == 1
}
accept_4 {
1 == 1
}
accept_5 {
1 == 1
}
or_0 = v {
results := [accept_3, accept_4, accept_5]
normalized := [normalize_criterion_result(x) | x := results[i]]
v := merge_with_or(normalized)
}
accept_6 {
1 == 1
}
accept_7 {
1 == 1
}
accept_8 {
1 == 1
}
not_0 = v {
results := [accept_6, accept_7, accept_8]
normalized := [normalize_criterion_result(x) | x := results[i]]
inverted := [invert_criterion_result(x) | x := results[i]]
v := merge_with_and(inverted)
}
accept_9 {
1 == 1
}
accept_10 {
1 == 1
}
accept_11 {
1 == 1
}
nor_0 = v {
results := [accept_9, accept_10, accept_11]
normalized := [normalize_criterion_result(x) | x := results[i]]
inverted := [invert_criterion_result(x) | x := results[i]]
v := merge_with_or(inverted)
}
accept_12 {
1 == 1
}
and_1 = v {
results := [accept_12]
normalized := [normalize_criterion_result(x) | x := results[i]]
v := merge_with_and(normalized)
}
allow = v {
results := [and_0, or_0, not_0, nor_0, and_1]
normalized := [normalize_criterion_result(x) | x := results[i]]
v := merge_with_or(normalized)
}
accept_13 {
1 == 1
}
accept_14 {
1 == 1
}
nor_1 = v {
results := [accept_13, accept_14]
normalized := [normalize_criterion_result(x) | x := results[i]]
inverted := [invert_criterion_result(x) | x := results[i]]
v := merge_with_or(inverted)
}
deny = v {
results := [nor_1]
normalized := [normalize_criterion_result(x) | x := results[i]]
v := merge_with_or(normalized)
}
invert_criterion_result(in) = out {
in[0]
out = array.concat([false], array.slice(in, 1, count(in)))
}
else = out {
not in[0]
out = array.concat([true], array.slice(in, 1, count(in)))
}
normalize_criterion_result(result) = v {
is_boolean(result)
v = [result, set()]
}
else = v {
is_array(result)
v = result
}
else = v {
v = [false, set()]
}
object_union(xs) = merged {
merged = {k: v |
some k
xs[_][k]
vs := [xv | xv := xs[_][k]]
v := vs[count(vs) - 1]
}
}
merge_with_and(results) = [true, reasons, additional_data] {
true_results := [x | x := results[i]; x[0]]
count(true_results) == count(results)
reasons := union({x | x := true_results[i][1]})
additional_data := object_union({x | x := true_results[i][2]})
}
else = [false, reasons, additional_data] {
false_results := [x | x := results[i]; not x[0]]
reasons := union({x | x := false_results[i][1]})
additional_data := object_union({x | x := false_results[i][2]})
}
merge_with_or(results) = [true, reasons, additional_data] {
true_results := [x | x := results[i]; x[0]]
count(true_results) > 0
reasons := union({x | x := true_results[i][1]})
additional_data := object_union({x | x := true_results[i][2]})
}
else = [false, reasons, additional_data] {
false_results := [x | x := results[i]; not x[0]]
reasons := union({x | x := false_results[i][1]})
additional_data := object_union({x | x := false_results[i][2]})
}
`, string(format.MustAst(mod)))
}
|
package p_test
import (
"testing"
"github.com/Kretech/xgo/encoding"
"github.com/Kretech/xgo/p"
)
type _S struct {
}
func (this *_S) a() string {
return `_s.a`
}
func (this *_S) b(t string) string {
return `_s.b(` + t + `)`
}
func TestDump(t *testing.T) {
aInt := 1
bStr := `sf`
cMap := map[string]interface{}{"name": "z", "age": 14}
dArray := []interface{}{&cMap, aInt, bStr}
c := cMap
p.Dump(aInt, &aInt, &bStr, bStr, cMap, dArray, c, cMap["name"], dArray[2], dArray[aInt])
userId := func() int { return 4 }
p.Dump(userId())
p.Dump(userId2())
_s := _S{}
p.Dump(_s.a())
p.Dump(_s.b(`t`))
p.Dump(encoding.JsonEncode(`abc`))
p.Dump(encoding.JsonEncode(map[string]interface{}{"a": aInt}))
}
func userId2() int {
return 8
}
|
package main
import (
"github.com/freignat91/mlearning/api"
"github.com/spf13/cobra"
)
type displayOptions struct {
coef bool
}
var (
displayOpts = displayOptions{}
)
//DisplayCmd .
var DisplayCmd = &cobra.Command{
Use: "display",
Short: "display network",
Run: func(cmd *cobra.Command, args []string) {
if err := mlCli.display(cmd, args); err != nil {
mlCli.Fatal("Error: %v\n", err)
}
},
}
func init() {
DisplayCmd.Flags().BoolVar(&displayOpts.coef, "coef", false, "display link coefs")
NetworkCmd.AddCommand(DisplayCmd)
}
func (m *mlCLI) display(cmd *cobra.Command, args []string) error {
api := mlapi.New(m.server)
lines, err := api.Display(displayOpts.coef)
if err != nil {
return err
}
displayList(lines)
return nil
}
|
package client
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"net/http"
"time"
"github.com/drand/drand/chain"
"github.com/drand/drand/log"
json "github.com/nikkolasg/hexjson"
)
// HTTPGetter is an interface for the exercised methods of an `http.Client`,
// or equivalent alternative.
type HTTPGetter interface {
Do(req *http.Request) (*http.Response, error)
Get(url string) (resp *http.Response, err error)
}
// NewHTTPClient creates a new client pointing to an HTTP endpoint
func NewHTTPClient(url string, chainHash []byte, client HTTPGetter) (Client, error) {
if client == nil {
client = &http.Client{}
}
c := &httpClient{
root: url,
client: client,
l: log.DefaultLogger,
}
chainInfo, err := c.FetchChainInfo(chainHash)
if err != nil {
return nil, err
}
c.chainInfo = chainInfo
return c, nil
}
// NewHTTPClientWithInfo constructs an http client when the group parameters are already known.
func NewHTTPClientWithInfo(url string, info *chain.Info, client HTTPGetter) (Client, error) {
if client == nil {
client = &http.Client{}
}
c := &httpClient{
root: url,
chainInfo: info,
client: client,
l: log.DefaultLogger,
}
return c, nil
}
// httpClient implements Client through http requests to a Drand relay.
type httpClient struct {
root string
client HTTPGetter
chainInfo *chain.Info
l log.Logger
}
// FetchGroupInfo attempts to initialize an httpClient when
// it does not know the full group paramters for a drand group. The chain hash
// is the hash of the chain info.
func (h *httpClient) FetchChainInfo(chainHash []byte) (*chain.Info, error) {
if h.chainInfo != nil {
return h.chainInfo, nil
}
infoBody, err := h.client.Get(fmt.Sprintf("%s/info", h.root))
if err != nil {
return nil, err
}
defer infoBody.Body.Close()
chainInfo, err := chain.InfoFromJSON(infoBody.Body)
if err != nil {
return nil, err
}
if chainInfo.PublicKey == nil {
return nil, fmt.Errorf("Group does not have a valid key for validation")
}
if chainHash == nil {
h.l.Warn("http_client", "instantiated without trustroot", "groupHash", hex.EncodeToString(chainInfo.Hash()))
}
if chainHash != nil && !bytes.Equal(chainInfo.Hash(), chainHash) {
return nil, fmt.Errorf("%s does not advertise the expected drand group (%x vs %x)", h.root, chainInfo.Hash(), chainHash)
}
return chainInfo, nil
}
// Implement textMarshaller
func (h *httpClient) MarshalText() ([]byte, error) {
return json.Marshal(h)
}
// RandomData holds the full random response from the server, including data needed
// for validation.
type RandomData struct {
Rnd uint64 `json:"round,omitempty"`
Random []byte `json:"randomness,omitempty"`
Sig []byte `json:"signature,omitempty"`
PreviousSignature []byte `json:"previous_signature,omitempty"`
}
// Round provides access to the round associatted with this random data.
func (r *RandomData) Round() uint64 {
return r.Rnd
}
// Signature provides the signature over this round's randomness
func (r *RandomData) Signature() []byte {
return r.Sig
}
// Randomness exports the randomness
func (r *RandomData) Randomness() []byte {
return r.Random
}
// Get returns a the randomness at `round` or an error.
func (h *httpClient) Get(ctx context.Context, round uint64) (Result, error) {
randResponse, err := h.client.Get(fmt.Sprintf("%s/public/%d", h.root, round))
if err != nil {
return nil, err
}
defer randResponse.Body.Close()
randResp := RandomData{}
if err := json.NewDecoder(randResponse.Body).Decode(&randResp); err != nil {
return nil, err
}
if len(randResp.Sig) == 0 || len(randResp.PreviousSignature) == 0 {
return nil, fmt.Errorf("insufficent response")
}
b := chain.Beacon{
PreviousSig: randResp.PreviousSignature,
Round: randResp.Rnd,
Signature: randResp.Sig,
}
if err := chain.VerifyBeacon(h.chainInfo.PublicKey, &b); err != nil {
h.l.Warn("http_client", "failed to verify value", "err", err)
return nil, err
}
randResp.Random = chain.RandomnessFromSignature(randResp.Sig)
return &randResp, nil
}
// Watch returns new randomness as it becomes available.
func (h *httpClient) Watch(ctx context.Context) <-chan Result {
return pollingWatcher(ctx, h, h.chainInfo, h.l)
}
// RoundAt will return the most recent round of randomness that will be available
// at time for the current client.
func (h *httpClient) RoundAt(time time.Time) uint64 {
return chain.CurrentRound(time.Unix(), h.chainInfo.Period, h.chainInfo.GenesisTime)
}
|
package gin
import (
"github.com/game-explorer/animal-chess-server/internal/pkg/gin"
"github.com/game-explorer/animal-chess-server/service/gin/handler"
)
func New(debug bool) *gin.Engine {
e := gin.NewGin(debug)
handler.Ws(e)
handler.Login(e)
return e
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"context"
"path"
"strconv"
"github.com/pingcap/tidb/disttask/framework/proto"
)
type taskTypeOptions struct {
PoolSize int32
}
// TaskTypeOption is the option of TaskType.
type TaskTypeOption func(opts *taskTypeOptions)
// WithPoolSize is the option of TaskType to set the pool size.
func WithPoolSize(poolSize int32) TaskTypeOption {
return func(opts *taskTypeOptions) {
opts.PoolSize = poolSize
}
}
type schedulerRegisterOptions struct {
}
// Constructor is the constructor of Scheduler.
type Constructor func(context context.Context, taskID int64, taskMeta []byte, step int64) (Scheduler, error)
// RegisterOption is the register option of Scheduler.
type RegisterOption func(opts *schedulerRegisterOptions)
// SubtaskExecutorConstructor is the constructor of SubtaskExecutor.
type SubtaskExecutorConstructor func(minimalTask proto.MinimalTask, step int64) (SubtaskExecutor, error)
type subtaskExecutorRegisterOptions struct {
}
// SubtaskExecutorRegisterOption is the register option of SubtaskExecutor.
type SubtaskExecutorRegisterOption func(opts *subtaskExecutorRegisterOptions)
var (
// key is task type
taskTypes = make(map[string]taskTypeOptions)
// key is task type + step
schedulerConstructors = make(map[string]Constructor)
schedulerOptions = make(map[string]schedulerRegisterOptions)
// key is task type + step
subtaskExecutorConstructors = make(map[string]SubtaskExecutorConstructor)
subtaskExecutorOptions = make(map[string]subtaskExecutorRegisterOptions)
)
// RegisterTaskType registers the task type.
func RegisterTaskType(taskType string, opts ...TaskTypeOption) {
var option taskTypeOptions
for _, opt := range opts {
opt(&option)
}
taskTypes[taskType] = option
}
// RegisterSchedulerConstructor registers the constructor of Scheduler.
func RegisterSchedulerConstructor(taskType string, step int64, constructor Constructor, opts ...RegisterOption) {
taskKey := getKey(taskType, step)
schedulerConstructors[taskKey] = constructor
var option schedulerRegisterOptions
for _, opt := range opts {
opt(&option)
}
schedulerOptions[taskKey] = option
}
// RegisterSubtaskExectorConstructor registers the constructor of SubtaskExecutor.
func RegisterSubtaskExectorConstructor(taskType string, step int64, constructor SubtaskExecutorConstructor, opts ...SubtaskExecutorRegisterOption) {
taskKey := getKey(taskType, step)
subtaskExecutorConstructors[taskKey] = constructor
var option subtaskExecutorRegisterOptions
for _, opt := range opts {
opt(&option)
}
subtaskExecutorOptions[taskKey] = option
}
func getKey(taskType string, step int64) string {
return path.Join(taskType, strconv.FormatInt(step, 10))
}
// ClearSchedulers is only used in test
func ClearSchedulers() {
taskTypes = make(map[string]taskTypeOptions)
schedulerConstructors = make(map[string]Constructor)
schedulerOptions = make(map[string]schedulerRegisterOptions)
subtaskExecutorConstructors = make(map[string]SubtaskExecutorConstructor)
subtaskExecutorOptions = make(map[string]subtaskExecutorRegisterOptions)
}
|
package bench
import (
"database/sql"
"fmt"
"log"
"math"
"math/rand"
"time"
sqlite "github.com/mattn/go-sqlite3"
)
// Computes x^y
func pow(x, y int64) int64 {
return int64(math.Pow(float64(x), float64(y)))
}
//computes the percentage
func targetPerc(current, target int64) float64 {
return 100.0 * (float64(current) / float64(target))
}
// Computes the bitwise exclusive-or of all its arguments
func xor(xs ...int64) int64 {
var ret int64
for _, x := range xs {
ret ^= x
}
return ret
}
// Returns a random number. It's actually deterministic here because
// we don't seed the RNG, but it's an example of a non-pure function
// from SQLite's POV.
func getrand() int64 {
return rand.Int63()
}
// Computes the standard deviation of a GROUPed BY set of values
type stddev struct {
xs []int64
// Running average calculation
sum int64
n int64
}
func newStddev() *stddev { return &stddev{} }
func (s *stddev) Step(x int64) {
s.xs = append(s.xs, x)
s.sum += x
s.n++
}
func (s *stddev) Done() float64 {
mean := float64(s.sum) / float64(s.n)
var sqDiff []float64
for _, x := range s.xs {
sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2))
}
var dev float64
for _, x := range sqDiff {
dev += x
}
dev /= float64(len(sqDiff))
return math.Sqrt(dev)
}
//compute the positiveOnly average of a grouped by set of values
type positiveOnlyAvg struct {
xs []int64
//Running average calc
sum int64
n int64
}
func newPositiveOnlyAvg() *positiveOnlyAvg { return &positiveOnlyAvg{} }
func (w *positiveOnlyAvg) Step(x int64) {
w.xs = append(w.xs, x)
w.sum += x
if x > 0 {
w.n++
}
}
func (w *positiveOnlyAvg) Done() float64 {
return float64(w.sum) / float64(w.n)
}
func WithpkClonePrint(table1Rows, table2Rows int) (err error) {
sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{
ConnectHook: func(conn *sqlite.SQLiteConn) error {
if err := conn.RegisterFunc("pow", pow, true); err != nil {
return err
}
if err := conn.RegisterFunc("xor", xor, true); err != nil {
return err
}
if err := conn.RegisterFunc("rand", getrand, false); err != nil {
return err
}
if err := conn.RegisterFunc("targetperc", targetPerc, true); err != nil {
return err
}
if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil {
return err
}
if err := conn.RegisterAggregator("positiveOnlyavg", newPositiveOnlyAvg, true); err != nil {
return err
}
return nil
},
})
db, err := sql.Open("sqlite3_custom", "trump.db")
if err != nil {
return
}
defer db.Close()
Withpk(db, table1Rows, table2Rows)
Sum(db)
StdDev(db)
//Pow(db)
WeightedAvg(db)
Target(db)
//CloneTable1Print(db)
return
}
func WithnopkClonePrint(table1Rows, table2Rows int) (err error) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return
}
defer db.Close()
WithnopkPrint(db, table1Rows, table2Rows)
CloneTable1Print(db)
return
}
func WithpkPrint(db *sql.DB, table1Rows, table2Rows int) (err error) {
totTime := time.Now()
start := time.Now()
//Table1
_, err = db.Exec("create table table1(id int primary key, sample1 Text,sample2 Text,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table1TX, err := db.Begin()
if err != nil {
return
}
table1Stmt, err := table1TX.Prepare("insert into table1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table1Stmt.Close()
for i := 0; i < table1Rows; i++ {
_, err = table1Stmt.Exec(i, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table1TX.Commit()
fmt.Println("Time to create table1 ", time.Since(start))
start = time.Now()
//Table2
_, err = db.Exec("create table table2(id int primary key, sample1 Text,sample2 Text,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table2TX, err := db.Begin()
if err != nil {
return
}
table2Stmt, err := table2TX.Prepare("insert into table2 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table2Stmt.Close()
for i := 0; i < table2Rows; i++ {
_, err = table2Stmt.Exec(i, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table2TX.Commit()
fmt.Println("Time to create table2 ", time.Since(start))
start = time.Now()
//Join Table1 & Table2
resultRowNum := 0
rows, err := db.Query("select one.sample1,one.sample2,one.sample3,one.sample4,one.sample5,one.sample6,one.sample7,one.sample8,one.sample9,one.sample10,one.sample11,one.sample12,one.sample13,one.sample14,one.sample15,one.sample16,one.sample17,one.sample18,one.sample19,one.sample20,one.sample21,one.sample22,one.sample23,one.sample24,one.sample25,one.sample26,one.sample27,one.sample28,one.sample29,one.sample30,one.sample31,one.sample32,one.sample33,one.sample34,one.sample35, two.sample1,two.sample2,two.sample3,two.sample4,two.sample5,two.sample6,two.sample7,two.sample8,two.sample9,two.sample10,two.sample11,two.sample12,two.sample13,two.sample14,two.sample15,two.sample16,two.sample17,two.sample18,two.sample19,two.sample20,two.sample21,two.sample22,two.sample23,two.sample24,two.sample25,two.sample26,two.sample27,two.sample28,two.sample29,two.sample30,two.sample31,two.sample32,two.sample33,two.sample34,two.sample35 from table1 as one inner join table2 as two on one.id=two.id; ")
if err != nil {
return
}
defer rows.Close()
fmt.Println("Time to select from join ", time.Since(start))
retrievedColumns, err := rows.Columns()
for rows.Next() {
resultRowNum++
}
fmt.Println("NumColumns: ", len(retrievedColumns))
fmt.Println("NumRows: ", resultRowNum)
fmt.Println("Total Time in WithPk ", time.Since(totTime))
return
}
func WithnopkPrint(db *sql.DB, table1Rows, table2Rows int) (err error) {
totTime := time.Now()
start := time.Now()
//Table1
_, err = db.Exec("create table table1(id int , sample1 Text,sample2 Text,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table1TX, err := db.Begin()
if err != nil {
return
}
table1Stmt, err := table1TX.Prepare("insert into table1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table1Stmt.Close()
for i := 0; i < table1Rows; i++ {
_, err = table1Stmt.Exec(i, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table1TX.Commit()
fmt.Println("Time to create table1 ", time.Since(start))
start = time.Now()
//Table2
_, err = db.Exec("create table table2(id int , sample1 Text,sample2 Text,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table2TX, err := db.Begin()
if err != nil {
return
}
table2Stmt, err := table2TX.Prepare("insert into table2 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table2Stmt.Close()
for i := 0; i < table2Rows; i++ {
_, err = table2Stmt.Exec(i, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table2TX.Commit()
fmt.Println("Time to create table2 ", time.Since(start))
start = time.Now()
//Join Table1 & Table2
resultRowNum := 0
rows, err := db.Query("select one.sample1,one.sample2,one.sample3,one.sample4,one.sample5,one.sample6,one.sample7,one.sample8,one.sample9,one.sample10,one.sample11,one.sample12,one.sample13,one.sample14,one.sample15,one.sample16,one.sample17,one.sample18,one.sample19,one.sample20,one.sample21,one.sample22,one.sample23,one.sample24,one.sample25,one.sample26,one.sample27,one.sample28,one.sample29,one.sample30,one.sample31,one.sample32,one.sample33,one.sample34,one.sample35, two.sample1,two.sample2,two.sample3,two.sample4,two.sample5,two.sample6,two.sample7,two.sample8,two.sample9,two.sample10,two.sample11,two.sample12,two.sample13,two.sample14,two.sample15,two.sample16,two.sample17,two.sample18,two.sample19,two.sample20,two.sample21,two.sample22,two.sample23,two.sample24,two.sample25,two.sample26,two.sample27,two.sample28,two.sample29,two.sample30,two.sample31,two.sample32,two.sample33,two.sample34,two.sample35 from table1 as one inner join table2 as two on one.id=two.id; ")
if err != nil {
return
}
defer rows.Close()
retrievedColumns, err := rows.Columns()
for rows.Next() {
resultRowNum++
}
fmt.Println("Time to select from join ", time.Since(start))
fmt.Println("NumColumns: ", len(retrievedColumns))
fmt.Println("NumRows: ", resultRowNum)
fmt.Println("Total Time is NoPK", time.Since(totTime))
return
}
func CloneTable1Print(db *sql.DB) (err error) {
totTime := time.Now()
start := time.Now()
//Table3
_, err = db.Exec("create table table3 as select * from table1;")
if err != nil {
log.Fatal("Failed to create table:", err)
}
fmt.Println("Time to clone table1 ", time.Since(start))
start = time.Now()
//Join Table1 & Table3
resultRowNum := 0
rows, err := db.Query("select one.sample1,one.sample2,one.sample3,one.sample4,one.sample5,one.sample6,one.sample7,one.sample8,one.sample9,one.sample10,one.sample11,one.sample12,one.sample13,one.sample14,one.sample15,one.sample16,one.sample17,one.sample18,one.sample19,one.sample20,one.sample21,one.sample22,one.sample23,one.sample24,one.sample25,one.sample26,one.sample27,one.sample28,one.sample29,one.sample30,one.sample31,one.sample32,one.sample33,one.sample34,one.sample35, two.sample1,two.sample2,two.sample3,two.sample4,two.sample5,two.sample6,two.sample7,two.sample8,two.sample9,two.sample10,two.sample11,two.sample12,two.sample13,two.sample14,two.sample15,two.sample16,two.sample17,two.sample18,two.sample19,two.sample20,two.sample21,two.sample22,two.sample23,two.sample24,two.sample25,two.sample26,two.sample27,two.sample28,two.sample29,two.sample30,two.sample31,two.sample32,two.sample33,two.sample34,two.sample35 from table1 as one inner join table3 as two on one.id=two.id; ")
if err != nil {
return
}
defer rows.Close()
retrievedColumns, err := rows.Columns()
for rows.Next() {
resultRowNum++
}
fmt.Println("Time to select from join of table1 and table3 ", time.Since(start))
fmt.Println("NumColumns: ", len(retrievedColumns))
fmt.Println("NumRows: ", resultRowNum)
fmt.Println("Total Time in CloneTable1 is ", time.Since(totTime))
return
}
func Sum(db *sql.DB) (err error) {
start := time.Now()
//Join Table1 & Table3
rows, err := db.Query("select SUM(sample2) as x, sample1 from table1 group by sample1; ")
if err != nil {
return
}
defer rows.Close()
var x int
var dept string
for rows.Next() {
rows.Scan(&x, &dept)
fmt.Println("The sum is ", x, " for ", dept)
}
fmt.Println("Time to sum", time.Since(start))
return
}
func StdDev(db *sql.DB) (err error) {
start := time.Now()
//Join Table1 & Table3
rows, err := db.Query("select stddev(sample2) as x ,sample1 from table1 group by sample1; ")
if err != nil {
return
}
defer rows.Close()
var x int
var dept string
for rows.Next() {
rows.Scan(&x, &dept)
fmt.Println("StdDev is ", x, " for the dept ", dept)
}
fmt.Println("Time to Stddev", time.Since(start))
return
}
func Pow(db *sql.DB) (err error) {
start := time.Now()
//Join Table1 & Table3
rows, err := db.Query("select pow(id,sample2) as x, sample1 from table1 group by sample1; ")
if err != nil {
return
}
defer rows.Close()
var x int
var dept string
for rows.Next() {
rows.Scan(&x, &dept)
fmt.Println("Pow is ", x, "for the dept ", dept)
}
fmt.Println("Time to Pow", time.Since(start))
return
}
func WeightedAvg(db *sql.DB) (err error) {
start := time.Now()
//Join Table1 & Table3
rows, err := db.Query("select sample1 as Dept, avg(sample3) as Avg , positiveOnlyavg(sample3) from table1 group by sample1;")
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var Avg float64
var dept string
var positiveOnlyAvg float64
rows.Scan(&dept, &Avg, &positiveOnlyAvg)
fmt.Println("The dept is ", dept, " avg is ", Avg, " wavg is ", positiveOnlyAvg)
}
fmt.Println("Time to WeightedAvg", time.Since(start))
return
}
func Target(db *sql.DB) (err error) {
start := time.Now()
//Join Table1 & Table3
rows, err := db.Query("select targetperc(sample4,sample5) as targ, sample1 from table1 group by sample1 ")
if err != nil {
return
}
defer rows.Close()
var x float64
var dept string
for rows.Next() {
rows.Scan(&x, &dept)
fmt.Println("Target perc is ", x, " for ", dept)
}
fmt.Println("Time to TargetPerc ", time.Since(start))
return
}
|
package 位运算
func singleNumber(nums []int) int {
number := 0
for i:=0;i<len(nums);i++{
number^=nums[i]
}
return number
}
/*
题目链接: https://leetcode-cn.com/problems/single-number/
*/
|
package main
// type SYSHEADStruct struct {
// ServiceCode ServiceCodeStruct
// ServiceScene ServiceSceneStruct
// ConsumerID ConsumerIDStruct
// TranDate TranDateStruct
// TranTimeStammp TranTimeStammpStruct
// }
// type AppHeadStruct struct {
// BussSeqNo BussSeqNoStruct
// }
// type BodyStruct struct {
// ObjectID ObjectIDStruct
// DocID DocIDstruct
// }
// type ServiceCodeStruct struct {
// XMLName xml.Name `xml:"SERVICE_CODE"`
// ValueStruct
// }
// type ServiceSceneStruct struct {
// XMLName xml.Name `xml:"SERVICE_SCENE"`
// ValueStruct
// }
// type ConsumerIDStruct struct {
// XMLName xml.Name `xml:"CONSUMER_ID"`
// ValueStruct
// }
// type TranDateStruct struct {
// XMLName xml.Name `xml:"CONSUMER_ID"`
// ValueStruct
// }
// type TranTimeStammpStruct struct {
// XMLName xml.Name `xml:"CONSUMER_ID"`
// ValueStruct
// }
// type BussSeqNoStruct struct {
// XMLName xml.Name `xml:"BULL_SEQ_NO"`
// ValueStruct
// }
// type ObjectIDStruct struct {
// XMLName xml.Name `xml:"OBJECT_ID"`
// ValueStruct
// }
// type DocIDstruct struct {
// XMLName xml.Name `xml:"DOC_ID"`
// ValueStruct
// }
|
package config
import (
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
func SetupDB() *gorm.DB {
// refer https://github.com/go-sql-driver/mysql#dsn-data-source-name for details
dsn := "host=localhost user=vianto password=Vianto1125 dbname=db_golang port=5432 sslmode=disable TimeZone=Asia/Shanghai"
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err != nil {
panic(err.Error())
}
return db
}
|
package flagset
import (
"github.com/micro/cli/v2"
"github.com/owncloud/ocis-hello/pkg/config"
"github.com/owncloud/ocis/v2/ocis-pkg/flags"
)
// RootWithConfig applies cfg to the root flagset
func RootWithConfig(cfg *config.Config) []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "config-file",
Value: "",
Usage: "Path to config file",
EnvVars: []string{"HELLO_CONFIG_FILE"},
Destination: &cfg.File,
},
&cli.StringFlag{
Name: "log-level",
Value: "info",
Usage: "Set logging level",
EnvVars: []string{"HELLO_LOG_LEVEL", "OCIS_LOG_LEVEL"},
Destination: &cfg.Log.Level,
},
&cli.BoolFlag{
Name: "log-pretty",
Usage: "Enable pretty logging",
EnvVars: []string{"HELLO_LOG_PRETTY", "OCIS_LOG_PRETTY"},
Destination: &cfg.Log.Pretty,
},
&cli.BoolFlag{
Name: "log-color",
Usage: "Enable colored logging",
EnvVars: []string{"HELLO_LOG_COLOR", "OCIS_LOG_COLOR"},
Destination: &cfg.Log.Color,
},
}
}
// HealthWithConfig applies cfg to the root flagset
func HealthWithConfig(cfg *config.Config) []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "debug-addr",
Value: "0.0.0.0:9109",
Usage: "Address to debug endpoint",
EnvVars: []string{"HELLO_DEBUG_ADDR"},
Destination: &cfg.Debug.Addr,
},
}
}
// ServerWithConfig applies cfg to the root flagset
func ServerWithConfig(cfg *config.Config) []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "log-file",
Usage: "Enable log to file",
EnvVars: []string{"HELLO_LOG_FILE", "OCIS_LOG_FILE"},
Destination: &cfg.Log.File,
},
&cli.BoolFlag{
Name: "tracing-enabled",
Usage: "Enable sending traces",
EnvVars: []string{"HELLO_TRACING_ENABLED"},
Destination: &cfg.Tracing.Enabled,
},
&cli.StringFlag{
Name: "tracing-type",
Value: "jaeger",
Usage: "Tracing backend type",
EnvVars: []string{"HELLO_TRACING_TYPE"},
Destination: &cfg.Tracing.Type,
},
&cli.StringFlag{
Name: "tracing-endpoint",
Value: "",
Usage: "Endpoint for the agent",
EnvVars: []string{"HELLO_TRACING_ENDPOINT"},
Destination: &cfg.Tracing.Endpoint,
},
&cli.StringFlag{
Name: "tracing-collector",
Value: "",
Usage: "Endpoint for the collector",
EnvVars: []string{"HELLO_TRACING_COLLECTOR"},
Destination: &cfg.Tracing.Collector,
},
&cli.StringFlag{
Name: "tracing-service",
Value: "hello",
Usage: "Service name for tracing",
EnvVars: []string{"HELLO_TRACING_SERVICE"},
Destination: &cfg.Tracing.Service,
},
&cli.StringFlag{
Name: "debug-addr",
Value: "0.0.0.0:9109",
Usage: "Address to bind debug server",
EnvVars: []string{"HELLO_DEBUG_ADDR"},
Destination: &cfg.Debug.Addr,
},
&cli.StringFlag{
Name: "debug-token",
Value: "",
Usage: "Token to grant metrics access",
EnvVars: []string{"HELLO_DEBUG_TOKEN"},
Destination: &cfg.Debug.Token,
},
&cli.BoolFlag{
Name: "debug-pprof",
Usage: "Enable pprof debugging",
EnvVars: []string{"HELLO_DEBUG_PPROF"},
Destination: &cfg.Debug.Pprof,
},
&cli.BoolFlag{
Name: "debug-zpages",
Usage: "Enable zpages debugging",
EnvVars: []string{"HELLO_DEBUG_ZPAGES"},
Destination: &cfg.Debug.Zpages,
},
&cli.StringFlag{
Name: "http-namespace",
Value: "com.owncloud.web",
Usage: "Set the base namespace for the http namespace",
EnvVars: []string{"HELLO_HTTP_NAMESPACE"},
Destination: &cfg.HTTP.Namespace,
},
&cli.StringFlag{
Name: "http-addr",
Value: "0.0.0.0:9105",
Usage: "Address to bind http server",
EnvVars: []string{"HELLO_HTTP_ADDR"},
Destination: &cfg.HTTP.Addr,
},
&cli.StringFlag{
Name: "http-root",
Value: "/",
Usage: "Root path of http server",
EnvVars: []string{"HELLO_HTTP_ROOT"},
Destination: &cfg.HTTP.Root,
},
&cli.IntFlag{
Name: "http-cache-ttl",
Value: flags.OverrideDefaultInt(cfg.HTTP.CacheTTL, 604800),
Usage: "Set the static assets caching duration in seconds",
EnvVars: []string{"HELLO_CACHE_TTL"},
Destination: &cfg.HTTP.CacheTTL,
},
&cli.StringFlag{
Name: "grpc-namespace",
Value: "com.owncloud.api",
Usage: "Set the base namespace for the grpc namespace",
EnvVars: []string{"HELLO_GRPC_NAMESPACE"},
Destination: &cfg.GRPC.Namespace,
},
&cli.StringFlag{
Name: "name",
Value: flags.OverrideDefaultString(cfg.Server.Name, "hello"),
Usage: "service name",
EnvVars: []string{"HELLO_NAME"},
Destination: &cfg.Server.Name,
},
&cli.StringFlag{
Name: "grpc-addr",
Value: "0.0.0.0:9106",
Usage: "Address to bind grpc server",
EnvVars: []string{"HELLO_GRPC_ADDR"},
Destination: &cfg.GRPC.Addr,
},
&cli.StringFlag{
Name: "asset-path",
Value: "",
Usage: "Path to custom assets",
EnvVars: []string{"HELLO_ASSET_PATH"},
Destination: &cfg.Asset.Path,
},
&cli.StringFlag{
Name: "jwt-secret",
Value: "Pive-Fumkiu4",
Usage: "Used to create JWT to talk to reva, should equal reva's jwt-secret",
EnvVars: []string{"HELLO_JWT_SECRET"},
Destination: &cfg.TokenManager.JWTSecret,
},
&cli.StringFlag{
Name: "admin-user-id",
Usage: "User to authenticate to ocis, should equal ocis's admin_user_id",
EnvVars: []string{"HELLO_ADMIN_USER_ID"},
Required: true,
Destination: &cfg.AdminUserID,
},
}
}
|
package utilsauthentication
import (
jwt "github.com/dgrijalva/jwt-go"
)
// Auth - это базовая структура
type Auth struct {
Options Options
}
// Options - ...
type Options struct {
SigningKey string
TokenKey string
}
// New - создает новый экземпляр ...
func New() *Auth {
var opts Options
opts.SigningKey = "SigningKey"
opts.TokenKey = "TokenKey"
return &Auth{
Options: opts,
}
}
// NewToken - создает новый токен содержащий в себе различные переменные
func (a Auth) NewToken(vars map[string]interface{}) (string, error) {
claims := jwt.MapClaims(vars)
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString([]byte(a.Options.SigningKey))
return tokenString, err
}
// VerifyToken - проверка токена на валидность с последующим возвратом его содержимого
func (a Auth) VerifyToken(tokenString string) (jwt.Claims, error) {
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
return []byte(a.Options.SigningKey), nil
})
if err != nil {
return nil, err
}
return token.Claims, err
}
|
package winkeys
import (
"golang.org/x/sys/windows"
"syscall"
"time"
"unsafe"
)
const (
keyDown = 0
KeyExtend = 0x0001
keyUp = 0x0002
keyUnicode = 0x0004
)
/*
参考:
https://docs.microsoft.com/ja-jp/windows/win32/inputdev/virtual-key-codes?redirectedfrom=MSDN
*/
const (
VkReturn = 13
VkA = 0x41
VkX = 0x58
VkV = 0x56
VkLShift = 0xA0
VkRShift = 0xA1
VkLControl = 0xA2
VkRControl = 0xA3
VkLMenu = 0xA4
VkRMenu = 0xA5
VkF1 = 0x70
VkF2 = 0x71
VkF3 = 0x72
VkF4 = 0x73
VkF5 = 0x74
VkF6 = 0x75
VkF7 = 0x76
VkF8 = 0x77
VkF9 = 0x78
VkF10 = 0x79
VkF11 = 0x7A
VkF12 = 0x7B
)
type KEYBD_INPUT struct {
Type uint32
Ki KEYBDINPUT
}
type KEYBDINPUT struct {
WVk uint16
WScan uint16
DwFlags uint32
Time uint32
DwExtraInfo uintptr
Unused [8]byte
}
var (
libuser32 *windows.LazyDLL
sendInput *windows.LazyProc
)
func init() {
// Library
libuser32 = windows.NewLazySystemDLL("user32.dll")
sendInput = libuser32.NewProc("SendInput")
}
func send(nInputs uint32, pInputs unsafe.Pointer, cbSize int32) uint32 {
ret, _, _ := syscall.Syscall(sendInput.Addr(), 3,
uintptr(nInputs),
uintptr(pInputs),
uintptr(cbSize))
return uint32(ret)
}
func SendkeyVk(vk uint16) {
var inputs []KEYBD_INPUT
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: vk,
WScan: 0,
DwFlags: keyDown,
Time: 0,
DwExtraInfo: 0,
},
})
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: vk,
WScan: 0,
DwFlags: keyUp,
Time: 0,
DwExtraInfo: 0,
},
})
cbSize := int32(unsafe.Sizeof(KEYBD_INPUT{}))
for _, inp := range inputs {
send(1, unsafe.Pointer(&inp), cbSize)
}
}
func SendShortCutkeyVk(vk1 uint16, vk2 uint16) {
var inputs []KEYBD_INPUT
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: vk1,
WScan: 0,
DwFlags: keyDown,
Time: 0,
DwExtraInfo: 0,
},
})
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: vk2,
WScan: 0,
DwFlags: keyDown,
Time: 0,
DwExtraInfo: 0,
},
})
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: vk2,
WScan: 0,
DwFlags: keyUp,
Time: 0,
DwExtraInfo: 0,
},
})
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: vk1,
WScan: 0,
DwFlags: keyUp,
Time: 0,
DwExtraInfo: 0,
},
})
cbSize := int32(unsafe.Sizeof(KEYBD_INPUT{}))
for _, inp := range inputs {
send(1, unsafe.Pointer(&inp), cbSize)
time.Sleep(time.Millisecond * 100)
}
}
func Sendkey(c uint16) {
var inputs []KEYBD_INPUT
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: 0,
WScan: c,
DwFlags: keyDown | keyUnicode,
Time: 0,
DwExtraInfo: 0,
},
})
inputs = append(inputs, KEYBD_INPUT{
Type: 1,
Ki: KEYBDINPUT{
WVk: 0,
WScan: c,
DwFlags: keyUp | keyUnicode,
Time: 0,
DwExtraInfo: 0,
},
})
cbSize := int32(unsafe.Sizeof(KEYBD_INPUT{}))
for _, inp := range inputs {
send(1, unsafe.Pointer(&inp), cbSize)
}
}
func Sendkeys(str string) {
for _, c := range str {
Sendkey(uint16(c))
time.Sleep(time.Millisecond * 300)
}
}
|
package adaboost
import (
"math"
"github.com/gonum/matrix/mat64"
)
type AdaBoostClassifier struct {
nEstimators int
nSamples int
nFeatures int
clfs []*baseLearner
}
type baseLearner struct {
classifier *DecisionStump
weight float64
}
func NewAdaBoostClassifier(nEstimators int) *AdaBoostClassifier {
return &AdaBoostClassifier{
nEstimators: nEstimators,
}
}
func (abc *AdaBoostClassifier) Fit(X *mat64.Dense, y []int) error {
nRows, nCols := X.Dims()
abc.nSamples = nRows
abc.nFeatures = nCols
sampleWeight := make([]float64, nRows)
for i := range sampleWeight {
sampleWeight[i] = 1
}
for i := 0; i < abc.nEstimators; i++ {
ds := NewDecisionStump(10)
ds.Fit(X, y, sampleWeight)
errRate := ErrRate(y, ds.Predict(X), sampleWeight)
alpha := math.Log((1-errRate)/errRate) / 2
abc.clfs = append(abc.clfs, &baseLearner{classifier: ds, weight: alpha})
sampleWeight = abc.updateWeight(X, y, alpha, sampleWeight)
}
return nil
}
func (abc *AdaBoostClassifier) RawPredict(X *mat64.Dense) []float64 {
yRawPred := make([]float64, abc.nSamples)
for _, clf := range abc.clfs {
for i, x := range clf.classifier.Predict(X) {
yRawPred[i] = yRawPred[i] + float64(x)*clf.weight
}
}
return yRawPred
}
func (abc *AdaBoostClassifier) Predict(X *mat64.Dense) []int {
yPred := make([]int, abc.nSamples)
for i, val := range abc.RawPredict(X) {
if val > 0 {
yPred[i] = 1
} else {
yPred[i] = -1
}
}
return yPred
}
func (abc *AdaBoostClassifier) updateWeight(X *mat64.Dense, y []int, alpha float64, currentWeight []float64) []float64 {
newWeight := currentWeight
for i, rawY := range abc.RawPredict(X) {
newWeight[i] = currentWeight[i] * math.Exp(-1*float64(y[i])*rawY)
}
return newWeight
}
|
package context
import (
"context"
"os"
"github.com/apex/log"
"github.com/apex/log/handlers/text"
)
// Initialize calls 3 functions to set up, then
// logs before terminating
func Initialize() {
// set basic log up
log.SetHandler(text.New(os.Stdout))
// initialize our context
ctx := context.Background()
// create a logger and link it to
// the context
ctx, e := FromContext(ctx, log.Log)
// set a field
ctx = WithField(ctx, "id", "123")
e.Info("starting")
gatherName(ctx)
e.Info("after gatherName")
gatherLocation(ctx)
e.Info("after gatherLocation")
}
func gatherName(ctx context.Context) {
ctx = WithField(ctx, "name", "Go Cookbook")
}
func gatherLocation(ctx context.Context) {
ctx = WithFields(ctx, log.Fields{"city": "Seattle", "state": "WA"})
}
|
package api_test
import (
"context"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/odpf/stencil/models"
stencilv1 "github.com/odpf/stencil/server/odpf/stencil/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"google.golang.org/grpc/status"
)
var downloadFail = errors.New("download fail")
func TestDownload(t *testing.T) {
for _, test := range []struct {
desc string
name string
version string
notFoundErr error
downloadErr error
expectedCode int
}{
{"should return 400 if name is missing", "", "1.0.1", nil, nil, 400},
{"should return 400 if version is invalid", "name1", "invalid", nil, nil, 400},
{"should return 404 if version is not found", "name1", "3.3.1", models.ErrSnapshotNotFound, nil, 404},
{"should return 500 if finding snapshot fails", "name1", "3.3.1", errors.New("get snapshot fail"), nil, 500},
{"should return 500 if download fails", "name1", "1.0.1", nil, downloadFail, 500},
{"should return 200 if download succeeded", "name1", "1.0.1", nil, nil, 200},
{"should be able to download with latest version", "name1", "latest", nil, nil, 200},
} {
t.Run(fmt.Sprintf("http: %s", test.desc), func(t *testing.T) {
router, mockService, mockMetadata, _ := setup()
fileData := []byte("File contents")
mockMetadata.On("GetSnapshotByFields", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&models.Snapshot{}, test.notFoundErr)
mockService.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(fileData, test.downloadErr)
w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/namespaces/namespace/descriptors/%s/versions/%s", test.name, test.version), nil)
router.ServeHTTP(w, req)
assert.Equal(t, test.expectedCode, w.Code)
if test.expectedCode == 200 {
expectedHeader := fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, test.version, test.version)
assert.Equal(t, []byte("File contents"), w.Body.Bytes())
assert.Equal(t, expectedHeader, w.Header().Get("Content-Disposition"))
}
})
t.Run(fmt.Sprintf("gRPC: %s", test.desc), func(t *testing.T) {
ctx := context.Background()
_, mockService, mockMetadata, a := setup()
fileData := []byte("File contents")
mockMetadata.On("GetSnapshotByFields", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&models.Snapshot{}, test.notFoundErr)
mockService.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(fileData, test.downloadErr)
req := &stencilv1.DownloadDescriptorRequest{Namespace: "namespace", Name: test.name, Version: test.version}
res, err := a.DownloadDescriptor(ctx, req)
if test.expectedCode != 200 {
e := status.Convert(err)
assert.Equal(t, test.expectedCode, runtime.HTTPStatusFromCode(e.Code()))
} else {
assert.Equal(t, res.Data, []byte("File contents"))
}
})
}
t.Run("should return 404 if file content not found", func(t *testing.T) {
router, mockService, mockMetadata, _ := setup()
fileData := []byte("")
mockMetadata.On("GetSnapshotByFields", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&models.Snapshot{}, nil)
mockService.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(fileData, nil)
w := httptest.NewRecorder()
req, _ := http.NewRequest("GET", "/v1/namespaces/namespace/descriptors/n/versions/latest", nil)
router.ServeHTTP(w, req)
assert.Equal(t, 404, w.Code)
assert.Equal(t, []byte(`{"message":"not found"}`), w.Body.Bytes())
})
}
|
/*
Given a string and a non-negative int n, we'll say that the front of the string is the first 3 chars, or whatever is there if the string is less than length 3. Return n copies of the front;
*/
package main
import (
"fmt"
)
func front_times(s string, n int) string {
if n <= 0 {
return ""
}
var l int = 3
if len(s) < 3 {
l = len(s)
}
var st string = ""
for i := 0; i < n; i++ {
st += s[:l]
}
return st
}
func main(){
var status int = 0
if front_times("Chocolate", 2) == "ChoCho" {
status += 1
}
if front_times("Chocolate", -1) == "" {
status += 1
}
if front_times("Abc", 3) == "AbcAbcAbc" {
status += 1
}
if front_times("a", 0) == "" {
status += 1
}
if front_times("a", 4) == "aaaa" {
status += 1
}
if status == 5 {
fmt.Println("OK")
} else {
fmt.Println("NOT OK")
}
}
|
package status
import (
"context"
"io"
"syscall"
"github.com/projecteru2/cli/cmd/utils"
corepb "github.com/projecteru2/core/rpc/gen"
coreutils "github.com/projecteru2/core/utils"
"github.com/sethvargo/go-signalcontext"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
type statusOptions struct {
client corepb.CoreRPCClient
name string
entry string
node string
labels map[string]string
}
func (o *statusOptions) run(ctx context.Context) error {
sigCtx, cancel := signalcontext.Wrap(ctx, syscall.SIGINT, syscall.SIGTERM)
defer cancel()
resp, err := o.client.WorkloadStatusStream(sigCtx, &corepb.WorkloadStatusStreamOptions{
Appname: o.name,
Entrypoint: o.entry,
Nodename: o.node,
Labels: o.labels,
})
if err != nil || resp == nil {
return cli.Exit("", -1)
}
for {
msg, err := resp.Recv()
if err == io.EOF {
break
}
if err != nil || msg == nil {
return cli.Exit("", -1)
}
if msg.Error != "" {
if msg.Delete {
logrus.Warnf("%s deleted", coreutils.ShortID(msg.Id))
} else {
logrus.Errorf("[%s] status changed with error %v", coreutils.ShortID(msg.Id), msg.Error)
}
continue
}
if msg.Delete {
logrus.Warnf("[%s] %s status expired", coreutils.ShortID(msg.Id), msg.Workload.Name)
}
switch {
case !msg.Status.Running:
logrus.Warnf("[%s] %s on %s is stopped", coreutils.ShortID(msg.Id), msg.Workload.Name, msg.Workload.Nodename)
case !msg.Status.Healthy:
logrus.Warnf("[%s] %s on %s is unhealthy", coreutils.ShortID(msg.Id), msg.Workload.Name, msg.Workload.Nodename)
case msg.Status.Running && msg.Status.Healthy:
logrus.Infof("[%s] %s back to life", coreutils.ShortID(msg.Workload.Id), msg.Workload.Name)
for networkName, addrs := range msg.Workload.Publish {
logrus.Infof("[%s] published at %s bind %v", coreutils.ShortID(msg.Id), networkName, addrs)
}
}
}
return nil
}
func cmdStatus(c *cli.Context) error {
client, err := utils.NewCoreRPCClient(c)
if err != nil {
return err
}
o := &statusOptions{
client: client,
name: c.Args().First(),
entry: c.String("entry"),
node: c.String("node"),
labels: utils.SplitEquality(c.StringSlice("label")),
}
return o.run(c.Context)
}
|
package main
import (
"flag"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/juangm/go-exercises/go-htmlParser/link"
)
func main() {
urlFlag := flag.String("url", "https://gophercises.com", "the url that you want to build a sitemap for.")
flag.Parse()
pages := get(*urlFlag)
for _, page := range pages {
fmt.Println(page)
}
// 5. Find all pages (BFS)
// 6. Print out XML
}
func hrefs(body io.Reader, base string) []string {
links, err := link.Parse(body)
if err != nil {
fmt.Printf("Some error happened when parsing the links!")
}
var ret []string
for _, l := range links {
switch {
case strings.HasPrefix(l.Href, "/"):
ret = append(ret, base+l.Href)
case strings.HasPrefix(l.Href, "http"):
ret = append(ret, l.Href)
default:
fmt.Println("Not taking the URL: ", l.Href)
}
}
return ret
}
func get(urlStr string) []string {
resp, err := http.Get(urlStr)
if err != nil {
panic(err)
}
defer resp.Body.Close()
// 3. Build proper urls with our links
reqURL := resp.Request.URL
baseURL := &url.URL{
Scheme: reqURL.Scheme,
Host: reqURL.Host,
}
base := baseURL.String()
return filter(base, hrefs(resp.Body, base))
}
func filter(base string, links []string) []string {
var ret []string
for _, link := range links {
// Only links with same base URL
if strings.HasPrefix(link, base) {
ret = append(ret, link)
}
}
return ret
}
|
package leetcode
import (
"sort"
)
type ByX [][]int
type ByY [][]int
func (ary ByX) Len() int {
return len(ary)
}
func (ary ByY) Len() int {
return len(ary)
}
func (ary ByX) Less(i, j int) bool {
if ary[i][0] == ary[j][0] {
return ary[i][1] < ary[j][1]
}
return ary[i][0] < ary[j][0]
}
func (ary ByY) Less(i, j int) bool {
if ary[i][1] == ary[j][1] {
return ary[i][0] < ary[j][0]
}
return ary[i][1] < ary[j][1]
}
func (ary ByX) Swap(i, j int) {
ary[i], ary[j] = ary[j], ary[i]
}
func (ary ByY) Swap(i, j int) {
ary[i], ary[j] = ary[j], ary[i]
}
func (ary ByX) findVertical(x, y int) (int, int) {
L, U := -1, ary.Len()
for L+1 < U {
m := (L + U) / 2
if ary[m][0] < x {
L = m
} else if ary[m][0] > x {
U = m
} else {
if ary[m][1] < y {
L = m
} else {
U = m
}
}
}
return L, U
}
func (ary ByY) findHorizontal(x, y int) (int, int) {
L, U := -1, ary.Len()
for L+1 < U {
m := (L + U) / 2
if ary[m][1] < y {
L = m
} else if ary[m][1] > y {
U = m
} else {
if ary[m][0] < x {
L = m
} else {
U = m
}
}
}
return L, U
}
func robotSim(commands []int, obstacles [][]int) int {
const (
NORTH = iota
EAST
SOUTH
WEST
)
N := len(obstacles)
byX := ByX(make([][]int, 0, N))
byY := ByY(make([][]int, 0, N))
for _, ob := range obstacles {
if ob[0] != 0 || ob[1] != 0 {
byX = append(byX, ob)
byY = append(byY, ob)
} else {
N--
}
}
sort.Sort(byX)
sort.Sort(byY)
dir := NORTH
x, y := 0, 0
ans := 0
for _, cmd := range commands {
switch cmd {
case -1:
dir = (dir + 1) % 4
case -2:
dir = (dir + 3) % 4
default:
switch dir {
case NORTH, SOUTH:
L, U := byX.findVertical(x, y)
switch dir {
case NORTH:
y += cmd
if U < N && byX[U][0] == x {
if y >= byX[U][1] {
y = byX[U][1] - 1
}
}
case SOUTH:
y -= cmd
if L >= 0 && byX[L][0] == x {
if y <= byX[L][1] {
y = byX[L][1] + 1
}
}
}
case EAST, WEST:
L, U := byY.findHorizontal(x, y)
switch dir {
case EAST:
x += cmd
if U < N && byY[U][1] == y {
if x >= byY[U][0] {
x = byY[U][0] - 1
}
}
case WEST:
x -= cmd
if L >= 0 && byY[L][1] == y {
if x <= byY[L][0] {
x = byY[L][0] + 1
}
}
}
}
if ans < x*x+y*y {
ans = x*x + y*y
}
}
}
return ans
}
|
// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"encoding/hex"
"fmt"
"net"
"os"
"os/user"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcwallet/internal/cfgutil"
"github.com/btcsuite/btcwallet/internal/legacy/keystore"
"github.com/btcsuite/btcwallet/netparams"
"github.com/btcsuite/btcwallet/wallet"
flags "github.com/jessevdk/go-flags"
"github.com/lightninglabs/neutrino"
)
const (
defaultCAFilename = "btcd.cert"
defaultConfigFilename = "btcwallet.conf"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "btcwallet.log"
defaultRPCMaxClients = 10
defaultRPCMaxWebsockets = 25
)
var (
btcdDefaultCAFile = filepath.Join(btcutil.AppDataDir("btcd", false), "rpc.cert")
defaultAppDataDir = btcutil.AppDataDir("btcwallet", false)
defaultConfigFile = filepath.Join(defaultAppDataDir, defaultConfigFilename)
defaultRPCKeyFile = filepath.Join(defaultAppDataDir, "rpc.key")
defaultRPCCertFile = filepath.Join(defaultAppDataDir, "rpc.cert")
defaultLogDir = filepath.Join(defaultAppDataDir, defaultLogDirname)
)
type config struct {
// General application behavior
ConfigFile *cfgutil.ExplicitString `short:"C" long:"configfile" description:"Path to configuration file"`
ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"`
Create bool `long:"create" description:"Create the wallet if it does not exist"`
CreateTemp bool `long:"createtemp" description:"Create a temporary simulation wallet (pass=password) in the data directory indicated; must call with --datadir"`
AppDataDir *cfgutil.ExplicitString `short:"A" long:"appdata" description:"Application data directory for wallet config, databases and logs"`
TestNet3 bool `long:"testnet" description:"Use the test Bitcoin network (version 3) (default mainnet)"`
SimNet bool `long:"simnet" description:"Use the simulation test network (default mainnet)"`
SigNet bool `long:"signet" description:"Use the signet test network (default mainnet)"`
SigNetChallenge string `long:"signetchallenge" description:"Connect to a custom signet network defined by this challenge instead of using the global default signet test network -- Can be specified multiple times"`
SigNetSeedNode []string `long:"signetseednode" description:"Specify a seed node for the signet network instead of using the global default signet network seed nodes"`
NoInitialLoad bool `long:"noinitialload" description:"Defer wallet creation/opening on startup and enable loading wallets over RPC"`
DebugLevel string `short:"d" long:"debuglevel" description:"Logging level {trace, debug, info, warn, error, critical}"`
LogDir string `long:"logdir" description:"Directory to log output."`
Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"`
DBTimeout time.Duration `long:"dbtimeout" description:"The timeout value to use when opening the wallet database."`
// Wallet options
WalletPass string `long:"walletpass" default-mask:"-" description:"The public wallet password -- Only required if the wallet was created with one"`
// RPC client options
RPCConnect string `short:"c" long:"rpcconnect" description:"Hostname/IP and port of btcd RPC server to connect to (default localhost:8334, testnet: localhost:18334, simnet: localhost:18556)"`
CAFile *cfgutil.ExplicitString `long:"cafile" description:"File containing root certificates to authenticate a TLS connections with btcd"`
DisableClientTLS bool `long:"noclienttls" description:"Disable TLS for the RPC client -- NOTE: This is only allowed if the RPC client is connecting to localhost"`
BtcdUsername string `long:"btcdusername" description:"Username for btcd authentication"`
BtcdPassword string `long:"btcdpassword" default-mask:"-" description:"Password for btcd authentication"`
Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"`
ProxyUser string `long:"proxyuser" description:"Username for proxy server"`
ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"`
// SPV client options
UseSPV bool `long:"usespv" description:"Enables the experimental use of SPV rather than RPC for chain synchronization"`
AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"`
ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"`
MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"`
BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"`
BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."`
// RPC server options
//
// The legacy server is still enabled by default (and eventually will be
// replaced with the experimental server) so prepare for that change by
// renaming the struct fields (but not the configuration options).
//
// Usernames can also be used for the consensus RPC client, so they
// aren't considered legacy.
RPCCert *cfgutil.ExplicitString `long:"rpccert" description:"File containing the certificate file"`
RPCKey *cfgutil.ExplicitString `long:"rpckey" description:"File containing the certificate key"`
OneTimeTLSKey bool `long:"onetimetlskey" description:"Generate a new TLS certpair at startup, but only write the certificate to disk"`
DisableServerTLS bool `long:"noservertls" description:"Disable TLS for the RPC server -- NOTE: This is only allowed if the RPC server is bound to localhost"`
LegacyRPCListeners []string `long:"rpclisten" description:"Listen for legacy RPC connections on this interface/port (default port: 8332, testnet: 18332, simnet: 18554)"`
LegacyRPCMaxClients int64 `long:"rpcmaxclients" description:"Max number of legacy RPC clients for standard connections"`
LegacyRPCMaxWebsockets int64 `long:"rpcmaxwebsockets" description:"Max number of legacy RPC websocket connections"`
Username string `short:"u" long:"username" description:"Username for legacy RPC and btcd authentication (if btcdusername is unset)"`
Password string `short:"P" long:"password" default-mask:"-" description:"Password for legacy RPC and btcd authentication (if btcdpassword is unset)"`
// EXPERIMENTAL RPC server options
//
// These options will change (and require changes to config files, etc.)
// when the new gRPC server is enabled.
ExperimentalRPCListeners []string `long:"experimentalrpclisten" description:"Listen for RPC connections on this interface/port"`
// Deprecated options
DataDir *cfgutil.ExplicitString `short:"b" long:"datadir" default-mask:"-" description:"DEPRECATED -- use appdata instead"`
}
// cleanAndExpandPath expands environement variables and leading ~ in the
// passed path, cleans the result, and returns it.
func cleanAndExpandPath(path string) string {
// NOTE: The os.ExpandEnv doesn't work with Windows cmd.exe-style
// %VARIABLE%, but they variables can still be expanded via POSIX-style
// $VARIABLE.
path = os.ExpandEnv(path)
if !strings.HasPrefix(path, "~") {
return filepath.Clean(path)
}
// Expand initial ~ to the current user's home directory, or ~otheruser
// to otheruser's home directory. On Windows, both forward and backward
// slashes can be used.
path = path[1:]
var pathSeparators string
if runtime.GOOS == "windows" {
pathSeparators = string(os.PathSeparator) + "/"
} else {
pathSeparators = string(os.PathSeparator)
}
userName := ""
if i := strings.IndexAny(path, pathSeparators); i != -1 {
userName = path[:i]
path = path[i:]
}
homeDir := ""
var u *user.User
var err error
if userName == "" {
u, err = user.Current()
} else {
u, err = user.Lookup(userName)
}
if err == nil {
homeDir = u.HomeDir
}
// Fallback to CWD if user lookup fails or user has no home directory.
if homeDir == "" {
homeDir = "."
}
return filepath.Join(homeDir, path)
}
// validLogLevel returns whether or not logLevel is a valid debug log level.
func validLogLevel(logLevel string) bool {
switch logLevel {
case "trace":
fallthrough
case "debug":
fallthrough
case "info":
fallthrough
case "warn":
fallthrough
case "error":
fallthrough
case "critical":
return true
}
return false
}
// supportedSubsystems returns a sorted slice of the supported subsystems for
// logging purposes.
func supportedSubsystems() []string {
// Convert the subsystemLoggers map keys to a slice.
subsystems := make([]string, 0, len(subsystemLoggers))
for subsysID := range subsystemLoggers {
subsystems = append(subsystems, subsysID)
}
// Sort the subsytems for stable display.
sort.Strings(subsystems)
return subsystems
}
// parseAndSetDebugLevels attempts to parse the specified debug level and set
// the levels accordingly. An appropriate error is returned if anything is
// invalid.
func parseAndSetDebugLevels(debugLevel string) error {
// When the specified string doesn't have any delimters, treat it as
// the log level for all subsystems.
if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") {
// Validate debug log level.
if !validLogLevel(debugLevel) {
str := "the specified debug level [%v] is invalid"
return fmt.Errorf(str, debugLevel)
}
// Change the logging level for all subsystems.
setLogLevels(debugLevel)
return nil
}
// Split the specified string into subsystem/level pairs while detecting
// issues and update the log levels accordingly.
for _, logLevelPair := range strings.Split(debugLevel, ",") {
if !strings.Contains(logLevelPair, "=") {
str := "the specified debug level contains an invalid " +
"subsystem/level pair [%v]"
return fmt.Errorf(str, logLevelPair)
}
// Extract the specified subsystem and log level.
fields := strings.Split(logLevelPair, "=")
subsysID, logLevel := fields[0], fields[1]
// Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists {
str := "the specified subsystem [%v] is invalid -- " +
"supported subsytems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems())
}
// Validate log level.
if !validLogLevel(logLevel) {
str := "the specified debug level [%v] is invalid"
return fmt.Errorf(str, logLevel)
}
setLogLevel(subsysID, logLevel)
}
return nil
}
// loadConfig initializes and parses the config using a config file and command
// line options.
//
// The configuration proceeds as follows:
// 1) Start with a default config with sane settings
// 2) Pre-parse the command line to check for an alternative config file
// 3) Load configuration file overwriting defaults with any specified options
// 4) Parse CLI options and overwrite/add any specified options
//
// The above results in btcwallet functioning properly without any config
// settings while still allowing the user to override settings with config files
// and command line options. Command line options always take precedence.
func loadConfig() (*config, []string, error) {
// Default config.
cfg := config{
DebugLevel: defaultLogLevel,
ConfigFile: cfgutil.NewExplicitString(defaultConfigFile),
AppDataDir: cfgutil.NewExplicitString(defaultAppDataDir),
LogDir: defaultLogDir,
WalletPass: wallet.InsecurePubPassphrase,
CAFile: cfgutil.NewExplicitString(""),
RPCKey: cfgutil.NewExplicitString(defaultRPCKeyFile),
RPCCert: cfgutil.NewExplicitString(defaultRPCCertFile),
LegacyRPCMaxClients: defaultRPCMaxClients,
LegacyRPCMaxWebsockets: defaultRPCMaxWebsockets,
DataDir: cfgutil.NewExplicitString(defaultAppDataDir),
UseSPV: false,
AddPeers: []string{},
ConnectPeers: []string{},
MaxPeers: neutrino.MaxPeers,
BanDuration: neutrino.BanDuration,
BanThreshold: neutrino.BanThreshold,
DBTimeout: wallet.DefaultDBTimeout,
}
// Pre-parse the command line options to see if an alternative config
// file or the version flag was specified.
preCfg := cfg
preParser := flags.NewParser(&preCfg, flags.Default)
_, err := preParser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
preParser.WriteHelp(os.Stderr)
}
return nil, nil, err
}
// Show the version and exit if the version flag was specified.
funcName := "loadConfig"
appName := filepath.Base(os.Args[0])
appName = strings.TrimSuffix(appName, filepath.Ext(appName))
usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
if preCfg.ShowVersion {
fmt.Println(appName, "version", version())
os.Exit(0)
}
// Load additional config from file.
var configFileError error
parser := flags.NewParser(&cfg, flags.Default)
configFilePath := preCfg.ConfigFile.Value
if preCfg.ConfigFile.ExplicitlySet() {
configFilePath = cleanAndExpandPath(configFilePath)
} else {
appDataDir := preCfg.AppDataDir.Value
if !preCfg.AppDataDir.ExplicitlySet() && preCfg.DataDir.ExplicitlySet() {
appDataDir = cleanAndExpandPath(preCfg.DataDir.Value)
}
if appDataDir != defaultAppDataDir {
configFilePath = filepath.Join(appDataDir, defaultConfigFilename)
}
}
err = flags.NewIniParser(parser).ParseFile(configFilePath)
if err != nil {
if _, ok := err.(*os.PathError); !ok {
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
configFileError = err
}
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse()
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
parser.WriteHelp(os.Stderr)
}
return nil, nil, err
}
// Check deprecated aliases. The new options receive priority when both
// are changed from the default.
if cfg.DataDir.ExplicitlySet() {
fmt.Fprintln(os.Stderr, "datadir option has been replaced by "+
"appdata -- please update your config")
if !cfg.AppDataDir.ExplicitlySet() {
cfg.AppDataDir.Value = cfg.DataDir.Value
}
}
// If an alternate data directory was specified, and paths with defaults
// relative to the data dir are unchanged, modify each path to be
// relative to the new data dir.
if cfg.AppDataDir.ExplicitlySet() {
cfg.AppDataDir.Value = cleanAndExpandPath(cfg.AppDataDir.Value)
if !cfg.RPCKey.ExplicitlySet() {
cfg.RPCKey.Value = filepath.Join(cfg.AppDataDir.Value, "rpc.key")
}
if !cfg.RPCCert.ExplicitlySet() {
cfg.RPCCert.Value = filepath.Join(cfg.AppDataDir.Value, "rpc.cert")
}
}
// Choose the active network params based on the selected network.
// Multiple networks can't be selected simultaneously.
numNets := 0
if cfg.TestNet3 {
activeNet = &netparams.TestNet3Params
numNets++
}
if cfg.SimNet {
activeNet = &netparams.SimNetParams
numNets++
}
if cfg.SigNet {
activeNet = &netparams.SigNetParams
numNets++
// Let the user overwrite the default signet parameters. The
// challenge defines the actual signet network to join and the
// seed nodes are needed for network discovery.
sigNetChallenge := chaincfg.DefaultSignetChallenge
sigNetSeeds := chaincfg.DefaultSignetDNSSeeds
if cfg.SigNetChallenge != "" {
challenge, err := hex.DecodeString(cfg.SigNetChallenge)
if err != nil {
str := "%s: Invalid signet challenge, hex " +
"decode failed: %v"
err := fmt.Errorf(str, funcName, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
sigNetChallenge = challenge
}
if len(cfg.SigNetSeedNode) > 0 {
sigNetSeeds = make(
[]chaincfg.DNSSeed, len(cfg.SigNetSeedNode),
)
for idx, seed := range cfg.SigNetSeedNode {
sigNetSeeds[idx] = chaincfg.DNSSeed{
Host: seed,
HasFiltering: false,
}
}
}
chainParams := chaincfg.CustomSignetParams(
sigNetChallenge, sigNetSeeds,
)
activeNet.Params = &chainParams
}
if numNets > 1 {
str := "%s: The testnet, signet and simnet params can't be " +
"used together -- choose one"
err := fmt.Errorf(str, "loadConfig")
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Append the network type to the log directory so it is "namespaced"
// per network.
cfg.LogDir = cleanAndExpandPath(cfg.LogDir)
cfg.LogDir = filepath.Join(cfg.LogDir, activeNet.Params.Name)
// Special show command to list supported subsystems and exit.
if cfg.DebugLevel == "show" {
fmt.Println("Supported subsystems", supportedSubsystems())
os.Exit(0)
}
// Initialize log rotation. After log rotation has been initialized, the
// logger variables may be used.
initLogRotator(filepath.Join(cfg.LogDir, defaultLogFilename))
// Parse, validate, and set debug log level(s).
if err := parseAndSetDebugLevels(cfg.DebugLevel); err != nil {
err := fmt.Errorf("%s: %v", "loadConfig", err.Error())
fmt.Fprintln(os.Stderr, err)
parser.WriteHelp(os.Stderr)
return nil, nil, err
}
// Exit if you try to use a simulation wallet with a standard
// data directory.
if !(cfg.AppDataDir.ExplicitlySet() || cfg.DataDir.ExplicitlySet()) && cfg.CreateTemp {
fmt.Fprintln(os.Stderr, "Tried to create a temporary simulation "+
"wallet, but failed to specify data directory!")
os.Exit(0)
}
// Exit if you try to use a simulation wallet on anything other than
// simnet or testnet3.
if !cfg.SimNet && cfg.CreateTemp {
fmt.Fprintln(os.Stderr, "Tried to create a temporary simulation "+
"wallet for network other than simnet!")
os.Exit(0)
}
// Ensure the wallet exists or create it when the create flag is set.
netDir := networkDir(cfg.AppDataDir.Value, activeNet.Params)
dbPath := filepath.Join(netDir, wallet.WalletDBName)
if cfg.CreateTemp && cfg.Create {
err := fmt.Errorf("the flags --create and --createtemp can not " +
"be specified together. Use --help for more information")
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
dbFileExists, err := cfgutil.FileExists(dbPath)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
if cfg.CreateTemp { // nolint:gocritic
tempWalletExists := false
if dbFileExists {
str := fmt.Sprintf("The wallet already exists. Loading this " +
"wallet instead.")
fmt.Fprintln(os.Stdout, str)
tempWalletExists = true
}
// Ensure the data directory for the network exists.
if err := checkCreateDir(netDir); err != nil {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
if !tempWalletExists {
// Perform the initial wallet creation wizard.
if err := createSimulationWallet(&cfg); err != nil {
fmt.Fprintln(os.Stderr, "Unable to create wallet:", err)
return nil, nil, err
}
}
} else if cfg.Create {
// Error if the create flag is set and the wallet already
// exists.
if dbFileExists {
err := fmt.Errorf("the wallet database file `%v` "+
"already exists", dbPath)
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
// Ensure the data directory for the network exists.
if err := checkCreateDir(netDir); err != nil {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
// Perform the initial wallet creation wizard.
if err := createWallet(&cfg); err != nil {
fmt.Fprintln(os.Stderr, "Unable to create wallet:", err)
return nil, nil, err
}
// Created successfully, so exit now with success.
os.Exit(0)
} else if !dbFileExists && !cfg.NoInitialLoad {
keystorePath := filepath.Join(netDir, keystore.Filename)
keystoreExists, err := cfgutil.FileExists(keystorePath)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
if !keystoreExists {
err = fmt.Errorf("the wallet does not exist, run with " +
"the --create option to initialize and create it")
} else {
err = fmt.Errorf("the wallet is in legacy format, run " +
"with the --create option to import it")
}
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
localhostListeners := map[string]struct{}{
"localhost": {},
"127.0.0.1": {},
"::1": {},
}
if cfg.UseSPV {
neutrino.MaxPeers = cfg.MaxPeers
neutrino.BanDuration = cfg.BanDuration
neutrino.BanThreshold = cfg.BanThreshold
} else {
if cfg.RPCConnect == "" {
cfg.RPCConnect = net.JoinHostPort("localhost", activeNet.RPCClientPort)
}
// Add default port to connect flag if missing.
cfg.RPCConnect, err = cfgutil.NormalizeAddress(cfg.RPCConnect,
activeNet.RPCClientPort)
if err != nil {
fmt.Fprintf(os.Stderr,
"Invalid rpcconnect network address: %v\n", err)
return nil, nil, err
}
RPCHost, _, err := net.SplitHostPort(cfg.RPCConnect)
if err != nil {
return nil, nil, err
}
if cfg.DisableClientTLS {
if _, ok := localhostListeners[RPCHost]; !ok {
str := "%s: the --noclienttls option may not be used " +
"when connecting RPC to non localhost " +
"addresses: %s"
err := fmt.Errorf(str, funcName, cfg.RPCConnect)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
} else {
// If CAFile is unset, choose either the copy or local btcd cert.
if !cfg.CAFile.ExplicitlySet() {
cfg.CAFile.Value = filepath.Join(cfg.AppDataDir.Value, defaultCAFilename)
// If the CA copy does not exist, check if we're connecting to
// a local btcd and switch to its RPC cert if it exists.
certExists, err := cfgutil.FileExists(cfg.CAFile.Value)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
if !certExists {
if _, ok := localhostListeners[RPCHost]; ok {
btcdCertExists, err := cfgutil.FileExists(
btcdDefaultCAFile)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
if btcdCertExists {
cfg.CAFile.Value = btcdDefaultCAFile
}
}
}
}
}
}
// Only set default RPC listeners when there are no listeners set for
// the experimental RPC server. This is required to prevent the old RPC
// server from sharing listen addresses, since it is impossible to
// remove defaults from go-flags slice options without assigning
// specific behavior to a particular string.
if len(cfg.ExperimentalRPCListeners) == 0 && len(cfg.LegacyRPCListeners) == 0 {
addrs, err := net.LookupHost("localhost")
if err != nil {
return nil, nil, err
}
cfg.LegacyRPCListeners = make([]string, 0, len(addrs))
for _, addr := range addrs {
addr = net.JoinHostPort(addr, activeNet.RPCServerPort)
cfg.LegacyRPCListeners = append(cfg.LegacyRPCListeners, addr)
}
}
// Add default port to all rpc listener addresses if needed and remove
// duplicate addresses.
cfg.LegacyRPCListeners, err = cfgutil.NormalizeAddresses(
cfg.LegacyRPCListeners, activeNet.RPCServerPort)
if err != nil {
fmt.Fprintf(os.Stderr,
"Invalid network address in legacy RPC listeners: %v\n", err)
return nil, nil, err
}
cfg.ExperimentalRPCListeners, err = cfgutil.NormalizeAddresses(
cfg.ExperimentalRPCListeners, activeNet.RPCServerPort)
if err != nil {
fmt.Fprintf(os.Stderr,
"Invalid network address in RPC listeners: %v\n", err)
return nil, nil, err
}
// Both RPC servers may not listen on the same interface/port.
if len(cfg.LegacyRPCListeners) > 0 && len(cfg.ExperimentalRPCListeners) > 0 {
seenAddresses := make(map[string]struct{}, len(cfg.LegacyRPCListeners))
for _, addr := range cfg.LegacyRPCListeners {
seenAddresses[addr] = struct{}{}
}
for _, addr := range cfg.ExperimentalRPCListeners {
_, seen := seenAddresses[addr]
if seen {
err := fmt.Errorf("address `%s` may not be "+
"used as a listener address for both "+
"RPC servers", addr)
fmt.Fprintln(os.Stderr, err)
return nil, nil, err
}
}
}
// Only allow server TLS to be disabled if the RPC server is bound to
// localhost addresses.
if cfg.DisableServerTLS {
allListeners := append(cfg.LegacyRPCListeners,
cfg.ExperimentalRPCListeners...)
for _, addr := range allListeners {
host, _, err := net.SplitHostPort(addr)
if err != nil {
str := "%s: RPC listen interface '%s' is " +
"invalid: %v"
err := fmt.Errorf(str, funcName, addr, err)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
if _, ok := localhostListeners[host]; !ok {
str := "%s: the --noservertls option may not be used " +
"when binding RPC to non localhost " +
"addresses: %s"
err := fmt.Errorf(str, funcName, addr)
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
}
}
// Expand environment variable and leading ~ for filepaths.
cfg.CAFile.Value = cleanAndExpandPath(cfg.CAFile.Value)
cfg.RPCCert.Value = cleanAndExpandPath(cfg.RPCCert.Value)
cfg.RPCKey.Value = cleanAndExpandPath(cfg.RPCKey.Value)
// If the btcd username or password are unset, use the same auth as for
// the client. The two settings were previously shared for btcd and
// client auth, so this avoids breaking backwards compatibility while
// allowing users to use different auth settings for btcd and wallet.
if cfg.BtcdUsername == "" {
cfg.BtcdUsername = cfg.Username
}
if cfg.BtcdPassword == "" {
cfg.BtcdPassword = cfg.Password
}
// Warn about missing config file after the final command line parse
// succeeds. This prevents the warning on help messages and invalid
// options.
if configFileError != nil {
log.Warnf("%v", configFileError)
}
return &cfg, remainingArgs, nil
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rule
import (
"testing"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
"github.com/stretchr/testify/require"
)
func runJoinReorderTestData(t *testing.T, tk *testkit.TestKit, name string) {
var input []string
var output []struct {
SQL string
Plan []string
Warning []string
}
joinReorderSuiteData := GetJoinReorderSuiteData()
joinReorderSuiteData.LoadTestCasesByName(name, t, &input, &output)
require.Equal(t, len(input), len(output))
for i := range input {
testdata.OnRecord(func() {
output[i].SQL = input[i]
output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + input[i]).Rows())
output[i].Warning = testdata.ConvertRowsToStrings(tk.MustQuery("show warnings").Rows())
})
tk.MustQuery("explain format = 'brief' " + input[i]).Check(testkit.Rows(output[i].Plan...))
tk.MustQuery("show warnings").Check(testkit.Rows(output[i].Warning...))
}
}
func TestStraightJoinHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t, t1, t2, t3, t4;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
runJoinReorderTestData(t, tk, "TestStraightJoinHint")
}
func TestNoHashJoinHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
runJoinReorderTestData(t, tk, "TestNoHashJoinHint")
}
func TestNoMergeJoinHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1(a int, key(a));")
tk.MustExec("create table t2(a int, key(a));")
tk.MustExec("create table t3(a int, key(a));")
tk.MustExec("create table t4(a int, key(a));")
runJoinReorderTestData(t, tk, "TestNoMergeJoinHint")
}
func TestNoIndexJoinHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_index_merge_join=true`)
tk.MustExec("use test")
tk.MustExec("create table t1(a int, key(a));")
tk.MustExec("create table t2(a int, key(a));")
tk.MustExec("create table t3(a int, key(a));")
tk.MustExec("create table t4(a int, key(a));")
runJoinReorderTestData(t, tk, "TestNoIndexJoinHint")
}
func TestLeadingJoinHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t, t1, t2, t3, t4, t5, t6, t7, t8;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
tk.MustExec("create table t5(a int, b int, key(a));")
tk.MustExec("create table t6(a int, b int, key(a));")
tk.MustExec("create table t7(a int, b int, key(a));")
tk.MustExec("create table t8(a int, b int, key(a));")
runJoinReorderTestData(t, tk, "TestLeadingJoinHint")
// test cases for multiple leading hints
tk.MustExec("select /*+ leading(t1) leading(t2) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid"))
}
func TestJoinOrderHint(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1, t2, t3, t4, t5, t6, t7, t8;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
tk.MustExec("create table t5(a int, b int, key(a));")
tk.MustExec("create table t6(a int, b int, key(a));")
tk.MustExec("create table t7(a int, b int, key(a));")
tk.MustExec("create table t8(a int, b int, key(a));")
// test cases for using the leading hint and straight_join hint at the same time
tk.MustExec("select /*+ leading(t1) straight_join() */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use the straight_join hint, when we use the leading hint and straight_join hint at the same time, all leading hints will be invalid"))
tk.MustExec("select /*+ straight_join() leading(t1) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use the straight_join hint, when we use the leading hint and straight_join hint at the same time, all leading hints will be invalid"))
// more join order hints appear in the same time
tk.MustExec("select /*+ leading(t1) leading(t1) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid"))
tk.MustExec("select /*+ leading(t1) leading(t2) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid"))
tk.MustExec("select /*+ straight_join() straight_join() */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 STRAIGHT_JOIN() is defined more than once, only the last definition takes effect"))
// test cases for table name in hint
// the same table appears in the leading hint
tk.MustExec("select /*+ leading(t1, t1) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t1) in optimizer hint /*+ LEADING(t1, t1) */. Maybe you can use the table alias name",
"Warning 1815 leading hint is inapplicable, check if the leading hint table is valid"))
tk.MustExec("select /*+ leading(t1, t2, t1) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t1) in optimizer hint /*+ LEADING(t1, t2, t1) */. Maybe you can use the table alias name",
"Warning 1815 leading hint is inapplicable, check if the leading hint table is valid"))
// the wrong table appears in the leading hint
tk.MustExec("select /*+ leading(t) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t) in optimizer hint /*+ LEADING(t) */. Maybe you can use the table alias name"))
tk.MustExec("select /*+ leading(t1, t2, t) */ * from t1 join t2 on t1.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t) in optimizer hint /*+ LEADING(t1, t2, t) */. Maybe you can use the table alias name",
"Warning 1815 leading hint is inapplicable, check if the leading hint table is valid"))
// table alias in the leading hint
tk.MustExec("select /*+ leading(t) */ * from t1 t join t2 on t.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustExec("select /*+ leading(t1) */ * from t1 t join t2 on t.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t1) in optimizer hint /*+ LEADING(t1) */. Maybe you can use the table alias name"))
tk.MustExec("select /*+ leading(t2, t) */ * from t1 t join t2 on t.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows())
tk.MustExec("select /*+ leading(t2, t1) */ * from t1 t join t2 on t.a=t2.a join t3 on t2.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t1) in optimizer hint /*+ LEADING(t2, t1) */. Maybe you can use the table alias name",
"Warning 1815 leading hint is inapplicable, check if the leading hint table is valid"))
// table name in leading hint cross query block
// Todo: Can not handle this case yet. Because when we extract the join group, it will get the join group {t1, t2, t3}.
// So the table 't4' can not be used.
tk.MustExec("select /*+ leading(t4) */ * from (select t2.b from t1 join t2 on t1.a=t2.a) t4 join t3 on t4.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 leading hint is inapplicable, check if the leading hint table is valid"))
tk.MustExec("select /*+ leading(t3, t2@sel_2) */ * from (select t2.b from t1 join t2 on t1.a=t2.a) t4 join t3 on t4.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t2) in optimizer hint /*+ LEADING(t3, t2) */. Maybe you can use the table alias name"))
tk.MustExec("select * from (select /*+ leading(t1, t3@sel_1) */ t2.b from t1 join t2 on t1.a=t2.a) t4 join t3 on t4.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 There are no matching table names for (t3) in optimizer hint /*+ LEADING(t1, t3) */. Maybe you can use the table alias name"))
tk.MustExec("select /*+ leading(t3) */ * from (select /*+ leading(t1) */ t2.b from t1 join t2 on t1.a=t2.a) t4 join t3 on t4.b=t3.b")
tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1815 We can only use one leading hint at most, when multiple leading hints are used, all leading hints will be invalid"))
runJoinReorderTestData(t, tk, "TestJoinOrderHint")
}
func TestJoinOrderHint4StaticPartitionTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t, t1, t2, t3;")
tk.MustExec(`create table t(a int, b int) partition by hash(a) partitions 3`)
tk.MustExec(`create table t1(a int, b int) partition by hash(a) partitions 4`)
tk.MustExec(`create table t2(a int, b int) partition by hash(a) partitions 5`)
tk.MustExec(`create table t3(a int, b int) partition by hash(b) partitions 3`)
tk.MustExec(`create table t4(a int, b int) partition by hash(a) partitions 4`)
tk.MustExec(`create table t5(a int, b int) partition by hash(a) partitions 5`)
tk.MustExec(`create table t6(a int, b int) partition by hash(b) partitions 3`)
tk.MustExec(`set @@tidb_partition_prune_mode="static"`)
tk.MustExec("set @@tidb_enable_outer_join_reorder=true")
runJoinReorderTestData(t, tk, "TestJoinOrderHint4StaticPartitionTable")
}
func TestJoinOrderHint4DynamicPartitionTable(t *testing.T) {
failpoint.Enable("github.com/pingcap/tidb/planner/core/forceDynamicPrune", `return(true)`)
defer failpoint.Disable("github.com/pingcap/tidb/planner/core/forceDynamicPrune")
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1, t2, t3;")
tk.MustExec(`create table t(a int, b int) partition by hash(a) partitions 3`)
tk.MustExec(`create table t1(a int, b int) partition by hash(a) partitions 4`)
tk.MustExec(`create table t2(a int, b int) partition by hash(a) partitions 5`)
tk.MustExec(`create table t3(a int, b int) partition by hash(b) partitions 3`)
tk.MustExec(`create table t4(a int, b int) partition by hash(a) partitions 4`)
tk.MustExec(`create table t5(a int, b int) partition by hash(a) partitions 5`)
tk.MustExec(`create table t6(a int, b int) partition by hash(b) partitions 3`)
tk.MustExec(`set @@tidb_partition_prune_mode="dynamic"`)
tk.MustExec("set @@tidb_enable_outer_join_reorder=true")
runJoinReorderTestData(t, tk, "TestJoinOrderHint4DynamicPartitionTable")
}
func TestJoinOrderHint4DifferentJoinType(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t, t1, t2, t3, t4, t5, t6, t7, t8;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
tk.MustExec("create table t5(a int, b int, key(a));")
tk.MustExec("create table t6(a int, b int, key(a));")
tk.MustExec("create table t7(a int, b int, key(a));")
tk.MustExec("create table t8(a int, b int, key(a));")
tk.MustExec("set @@tidb_enable_outer_join_reorder=true")
runJoinReorderTestData(t, tk, "TestJoinOrderHint4DifferentJoinType")
}
func TestJoinOrderHint4TiFlash(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t, t1, t2, t3;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
tk.MustExec("create table t5(a int, b int, key(a));")
tk.MustExec("create table t6(a int, b int, key(a));")
tk.MustExec("set @@tidb_enable_outer_join_reorder=true")
// Create virtual tiflash replica info.
dom := domain.GetDomain(tk.Session())
is := dom.InfoSchema()
db, exists := is.SchemaByName(model.NewCIStr("test"))
require.True(t, exists)
for _, tblInfo := range db.Tables {
tableName := tblInfo.Name.L
if tableName == "t" || tableName == "t1" || tableName == "t2" || tableName == "t3" || tableName == "t4" || tableName == "t5" || tableName == "t6" {
tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
}
}
tk.MustExec("set @@tidb_allow_mpp=1; set @@tidb_enforce_mpp=1;")
runJoinReorderTestData(t, tk, "TestJoinOrderHint4TiFlash")
}
func TestJoinOrderHint4Subquery(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t, t1, t2, t3, t4, t5, t6, t7, t8;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
tk.MustExec("create table t5(a int, b int, key(a));")
tk.MustExec("create table t6(a int, b int, key(a));")
tk.MustExec("create table t7(a int, b int, key(a));")
tk.MustExec("create table t8(a int, b int, key(a));")
tk.MustExec("insert into t3 values(1, 1), (2, 2), (3, 3);")
tk.MustExec("analyze table t3;")
runJoinReorderTestData(t, tk, "TestJoinOrderHint4Subquery")
}
func TestLeadingJoinHint4OuterJoin(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("set tidb_cost_model_version=2")
tk.MustExec("drop table if exists t, t1, t2, t3, t4, t5, t6, t7, t8;")
tk.MustExec("create table t(a int, b int, key(a));")
tk.MustExec("create table t1(a int, b int, key(a));")
tk.MustExec("create table t2(a int, b int, key(a));")
tk.MustExec("create table t3(a int, b int, key(a));")
tk.MustExec("create table t4(a int, b int, key(a));")
tk.MustExec("create table t5(a int, b int, key(a));")
tk.MustExec("create table t6(a int, b int, key(a));")
tk.MustExec("create table t7(a int, b int, key(a));")
tk.MustExec("create table t8(a int, b int, key(a));")
tk.MustExec("set @@tidb_enable_outer_join_reorder=true")
runJoinReorderTestData(t, tk, "TestLeadingJoinHint4OuterJoin")
}
|
package model
import (
"fmt"
"gorm.io/gorm"
"time"
)
const PlatformBankCardTableName = "platform_bank_card"
const (
//状态:1-启用, 2-禁用
PlatformBankCardEnable = 1
PlatformBankCardDisable = 2
)
type PlatformBankCard struct {
Id int64 `gorm:"id"` // id
BankName string `gorm:"bank_name"` // 银行名称
AccountName string `gorm:"account_name"` // 开户名
CreateTime int64 `gorm:"create_time"` // 创建时间
CardNumber string `gorm:"card_number"` // 银行卡号
BranchName string `gorm:"branch_name"` // 支行名称
Currency string `gorm:"currency"` // 币种
MaxAmount int64 `gorm:"max_amount"` // 最大收款额度
QrCode string `gorm:"qr_code"` // 收款二维码
Remark string `gorm:"remark"` // 备注
Status int64 `gorm:"status"` // 状态:1-启用, 2-禁用
TodayReceived int64 `gorm:"today_received"` // 今日已收金额
BankCode string `gorm:"bank_code"`
}
func (t *PlatformBankCard) TableName() string {
return PlatformBankCardTableName
}
func NewPlatformBankCardModel(db *gorm.DB) *PlatformBankCardModel {
return &PlatformBankCardModel{db: db}
}
type PlatformBankCardModel struct {
db *gorm.DB
}
// 插入一条记录
func (m *PlatformBankCardModel) Insert(data *PlatformBankCard) error {
data.CreateTime = time.Now().Unix()
result := m.db.Create(data)
return result.Error
}
// 修改
func (m *PlatformBankCardModel) Update(id int64, data PlatformBankCard) error {
setMap := map[string]interface{}{
"branch_name": data.BranchName,
"qr_code": data.QrCode,
"remark": data.Remark,
}
if data.BankName != "" {
setMap["bank_name"] = data.BankName
}
if data.AccountName != "" {
setMap["account_name"] = data.AccountName
}
if data.CardNumber != "" {
setMap["card_number"] = data.CardNumber
}
if data.Currency != "" {
setMap["currency"] = data.Currency
}
if data.MaxAmount != 0 {
setMap["max_amount"] = data.MaxAmount
}
result := m.db.Model(&PlatformBankCard{Id: id}).Updates(&setMap)
return result.Error
}
// 删除
func (m *PlatformBankCardModel) Delete(id int64) error {
result := m.db.Delete(&PlatformBankCard{Id: id})
return result.Error
}
// 检查
func (m *PlatformBankCardModel) CheckById(id int64) (bool, error) {
var cnt int64
if err := m.db.Model(&PlatformBankCard{}).Where("id = ? ", id).Count(&cnt).Error; err != nil {
return false, err
}
return cnt > 0, nil
}
// 检查银行,卡号,币种是否重复
func (m *PlatformBankCardModel) CheckBankCard(bankName, cardNumber, currency string) (bool, error) {
var cnt int64
result := m.db.Model(&PlatformBankCard{}).Where("bank_name = ? and card_number= ? and currency= ?", bankName, cardNumber, currency).Count(&cnt)
if result.Error != nil {
return false, result.Error
}
return cnt > 0, nil
}
// 根据id查询一条记录
func (m *PlatformBankCardModel) FindOneById(id int64) (*PlatformBankCard, error) {
var o PlatformBankCard
m.db.Model(&PlatformBankCard{}).Where("id = ? ", id).First(&o)
return &o, nil
}
type FindPlatformBankCardList struct {
Search string
Currency string
Page int64
PageSize int64
Status int64
}
// 查询列表
func (m *PlatformBankCardModel) FindList(f FindPlatformBankCardList) ([]*PlatformBankCard, int64, error) {
var (
//card = fmt.Sprintf("%s p", PlatformBankCardTableName)
//rechargeOrder = fmt.Sprintf("left join %s m on m.platform_bank_card_id = p.id and m.order_status = ? ", MerchantRechargeOrderTableName)
//selectField = "p.id, p.bank_name, p.account_name, p.create_time, p.card_number, " +
// "p.branch_name, p.currency, p.max_amount, p.remark, p.status "
whereStr = " 1=1 "
args []interface{}
)
if f.Search != "" {
whereStr += "and (account_name like ? or card_number like ? )"
args = append(args, "%"+f.Search+"%", "%"+f.Search+"%")
}
if f.Currency != "" {
whereStr += "and currency = ? "
args = append(args, f.Currency)
}
if f.Status != 0 {
whereStr += "and status = ? "
args = append(args, f.Status)
}
if f.Page == 0 {
f.Page = 1
}
if f.PageSize == 0 {
f.PageSize = 10
}
var cnt int64
if err := m.db.Model(&PlatformBankCard{}).Where(whereStr, args...).Count(&cnt).Error; err != nil {
return nil, 0, err
}
//whereStr += "group by p.id order by p.create_time desc "
whereStr += "order by create_time desc limit ? offset ? "
args = append(args, f.PageSize, (f.Page-1)*f.PageSize)
var resp []*PlatformBankCard
if err := m.db.Model(&PlatformBankCard{}).Where(whereStr, args...).Scan(&resp).Error; err != nil {
return nil, 0, err
}
return resp, cnt, nil
}
// 启用
func (m *PlatformBankCardModel) EnableCard(id int64) error {
result := m.db.Model(&PlatformBankCard{Id: id}).Update("status", PlatformBankCardEnable)
return result.Error
}
// 禁用
func (m *PlatformBankCardModel) DisableCard(id int64) error {
result := m.db.Model(&PlatformBankCard{Id: id}).Update("status", PlatformBankCardDisable)
return result.Error
}
// 累加银行卡今日已收款
func (m *PlatformBankCardModel) PlusTodayReceived(id, amount int64) error {
sqlStr := fmt.Sprintf("UPDATE %s SET today_received=today_received+? WHERE id = ?", PlatformBankCardTableName)
result := m.db.Exec(sqlStr, amount, id)
return result.Error
}
// 今日已收金额清零
func (m *PlatformBankCardModel) TodayReceivedClear() error {
sqlStr := fmt.Sprintf("UPDATE %s SET today_received=0 WHERE today_received > 0", PlatformBankCardTableName)
result := m.db.Exec(sqlStr)
return result.Error
}
|
package listing
import "errors"
// ErrNotFound is used when a post could not be found.
var ErrNotFound = errors.New("Post not found")
// Repository provides access to the post storage.
type Repository interface {
// GetPost returns the post with given ID.
GetPost(string) (Post, error)
// GetAllPosts returns all posts saved in storage.
GetAllPosts() []Post
}
// Service provides post listing operations (see below).
type Service interface {
GetPost(string) (Post, error)
GetPosts() []Post
}
type service struct {
r Repository
}
// NewService creates a listing service with the necessary dependencies
func NewService(r Repository) Service {
return &service{r}
}
// GetPost returns a post with specified id
func (s *service) GetPost(id string) (Post, error) {
return s.r.GetPost(id)
}
// GetPosts returns all posts
func (s *service) GetPosts() []Post {
return s.r.GetAllPosts()
}
|
// Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package eternus
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"strconv"
"strings"
log "github.com/golang/glog"
"github.com/sodafoundation/dock/pkg/utils/pwd"
"golang.org/x/crypto/ssh"
yaml "gopkg.in/yaml.v2"
)
// EternusClient :
type EternusClient struct {
user string
password string
endpoint string
stdin io.WriteCloser
stdout io.Reader
stderr io.Reader
cliConfPath string
}
func NewClient(opt *AuthOptions) (*EternusClient, error) {
var pwdCiphertext = opt.Password
if opt.EnableEncrypted {
// Decrypte the password
pwdTool := pwd.NewPwdEncrypter(opt.PwdEncrypter)
password, err := pwdTool.Decrypter(pwdCiphertext)
if err != nil {
return nil, err
}
pwdCiphertext = password
}
c := &EternusClient{
user: opt.Username,
password: pwdCiphertext,
endpoint: opt.Endpoint,
cliConfPath: defaultCliConfPath,
}
return c, nil
}
func NewClientForAdmin(opt *AuthOptions) (*EternusClient, error) {
var pwdCiphertext = opt.AdminPassword
if opt.EnableEncrypted {
// Decrypte the password
pwdTool := pwd.NewPwdEncrypter(opt.PwdEncrypter)
password, err := pwdTool.Decrypter(pwdCiphertext)
if err != nil {
return nil, err
}
pwdCiphertext = password
}
c := &EternusClient{
user: opt.AdminUsername,
password: pwdCiphertext,
endpoint: opt.Endpoint,
cliConfPath: defaultCliConfPath,
}
return c, nil
}
func (c *EternusClient) Destroy() error {
_, err := c.stdin.Write([]byte("exit\n"))
return err
}
func (c *EternusClient) setConfig() *ssh.ClientConfig {
var defconfig ssh.Config
defconfig.SetDefaults()
cipherOrder := defconfig.Ciphers
config := &ssh.ClientConfig{
User: c.user,
Auth: []ssh.AuthMethod{
ssh.Password(c.password),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
config.Ciphers = append(cipherOrder, "3des-cbc")
return config
}
func (c *EternusClient) createSession(config *ssh.ClientConfig) (*ssh.Session, error) {
server := c.endpoint
server = server + ":" + SSHPort
conn, err := ssh.Dial("tcp", server, config)
if err != nil {
log.Error("failed to dial: " + err.Error())
return nil, err
}
session, err := conn.NewSession()
if err != nil {
log.Error("failed to create session: " + err.Error())
return nil, err
}
return session, nil
}
func (c *EternusClient) doRequest(cmd string, param map[string]string) (bytes.Buffer, error) {
// create command option
cmdOption := ""
if param != nil {
for k, v := range param {
cmdOption += fmt.Sprintf(" -%s %s ", k, v)
}
}
// execute command
log.Infof("execute cli. cmd = %s, option = %s", cmd, cmdOption)
c.stdin.Write([]byte(cmd + cmdOption + "\n"))
var result bytes.Buffer
buff := make([]byte, 65535)
// get command output
for {
n, err := c.stdout.Read(buff)
if err != io.EOF && err != nil {
return result, err
}
n, err = result.Write(buff[:n])
// ignore first '\r\nCLI>'
if result.String() == "\r\nCLI> " {
continue
}
// if error occured or suffix is 'CLI> ', break the loop
if err == io.EOF || strings.HasSuffix(result.String(), "CLI> ") {
break
}
}
c.stdin.Write([]byte("\n"))
return result, nil
}
func (c *EternusClient) request(cmd string, param map[string]string) ([]map[string]string, error) {
var b bytes.Buffer
var err error
var resultHash []map[string]string
success := false
for i := 0; i < 2; i++ {
b, err = c.doRequest(cmd, param)
if err == nil {
resultArray := c.convStringArray(cmd, &b)
resultHash, err = c.parseResult(cmd, resultArray)
if err != nil {
log.Errorf("failed to execute cmd. err = %s, output = %v", err.Error(), resultArray)
continue
}
success = true
break
} else {
log.Errorf("cmd:%s %s\n param:%v", cmd, c.endpoint, param)
}
log.Errorf("request %d times error:%v", i+1, err)
}
if success == false {
return resultHash, err
}
return resultHash, nil
}
// requestForadmin is temporary function for snapshot
// Do not use the function except snapshot
func (c *EternusClient) requestForAdmin(cmd string, param map[string]string) (bytes.Buffer, error) {
var b bytes.Buffer
var err error
success := false
for i := 0; i < 2; i++ {
b, err = c.doRequest(cmd, param)
if err == nil {
success = true
break
} else {
log.Errorf("cmd:%s %s\n param:%v", cmd, c.endpoint, param)
}
log.Errorf("request %d times error:%v", i+1, err)
}
if success == false {
return b, err
}
for _, s := range strings.Split(b.String(), "\r\n") {
// ignore empty line(first elem)
if s == "" {
continue
}
// ignore echo back string
if strings.HasPrefix(s, "CLI> "+cmd) {
continue
}
// ignore last line and stop parse
if s == "CLI> " {
break
}
// check error
if strings.HasPrefix(s, "Error: ") {
errMsg := fmt.Sprintf("failed to command output = %s", s)
log.Error(errMsg)
return b, errors.New(s)
}
}
return b, nil
}
func (c *EternusClient) login() error {
config := c.setConfig()
session, err := c.createSession(config)
if err != nil {
log.Error("failed to get session: " + err.Error())
return err
}
c.stdin, err = session.StdinPipe()
if err != nil {
log.Error("failed to get StdinPipe: " + err.Error())
return err
}
c.stdout, err = session.StdoutPipe()
if err != nil {
log.Error("failed to get StdoutPipe: " + err.Error())
return err
}
c.stderr, err = session.StderrPipe()
if err != nil {
log.Error("failed to get StderrPipe: " + err.Error())
return err
}
modes := ssh.TerminalModes{
ssh.ECHO: 0,
ssh.ECHOCTL: 0,
ssh.TTY_OP_ISPEED: 115200,
ssh.TTY_OP_OSPEED: 115200,
}
err = session.RequestPty("xterm", 80, 1024, modes)
if err != nil {
log.Error("failed to request pty: " + err.Error())
return err
}
err = session.Shell()
if err != nil {
log.Error("failed to get shell: " + err.Error())
return err
}
return nil
}
func (c *EternusClient) parseResult(cmd string, resultArray [][]string) ([]map[string]string, error) {
// read cli config file
yamlConfig, err := ioutil.ReadFile(c.cliConfPath)
if err != nil {
log.Error("failed to read cli_response.yml: " + err.Error())
return nil, err
}
// parse yaml
var config map[string]map[string]([]map[string]interface{})
err = yaml.Unmarshal(yamlConfig, &config)
// get config for specified cmd
cmdConfig := config["cli"][strings.Replace(cmd, " ", "_", -1)]
// parse resultArray
var ret []map[string]string
resultIndex := 0
var dataCount int
for _, v := range cmdConfig {
switch v["type"] {
case "status":
var status int
// check for correct response
if len(resultArray) > resultIndex {
// get response status
if len(resultArray[resultIndex]) == 1 {
status, _ = strconv.Atoi(resultArray[resultIndex][0])
} else {
return nil, errors.New("error response. Failed to get status")
}
} else {
return nil, errors.New("error response. Failed to get status")
}
// check response status
if status != 0 {
if len(resultArray) > (resultIndex+1) &&
len(resultArray[resultIndex+1]) == 1 {
errorCode := map[string]string{
"errorCode": resultArray[resultIndex+1][0],
}
ret = append(ret, errorCode)
}
return ret, errors.New("error response. Command execute error")
}
resultIndex++
case "count":
// check for correct response
if len(resultArray) > resultIndex {
// get data count
if len(resultArray[resultIndex]) == 1 {
tmpCount, _ := strconv.ParseInt(resultArray[resultIndex][0], 16, 64)
dataCount = int(tmpCount)
} else {
fmt.Println(resultArray[resultIndex])
return nil, errors.New("error response. Failed to get count")
}
} else {
return nil, errors.New("error response. Failed to get count")
}
if v["if_zero_skip_all"] == true && dataCount == 0 {
break
}
resultIndex++
case "data":
// check drop flag
if v["drop"] == true {
resultIndex++
continue
}
// check for correct response
if len(resultArray) > resultIndex {
// get single data
datas := v["data"].([]interface{})
result, err := c.getData(datas, resultArray[resultIndex])
if err != nil {
return nil, err
}
ret = append(ret, result)
resultIndex++
} else {
return nil, errors.New("error response. Failed to get data")
}
case "multiple_data":
// get multiple data, data count = dataCount variable
datas := v["data"].([]interface{})
for j := 0; j < dataCount; j++ {
// check drop flag
if v["drop"] == true {
resultIndex++
continue
}
if len(resultArray) > resultIndex {
result, err := c.getData(datas, resultArray[resultIndex])
if err != nil {
return nil, err
}
ret = append(ret, result)
resultIndex++
} else {
return nil, errors.New("error response. Failed to get multiple_data")
}
}
}
}
return ret, nil
}
func (c *EternusClient) getData(datas []interface{}, result []string) (map[string]string, error) {
data := map[string]string{}
for i, v := range datas {
// store each param
paramName := v.(map[interface{}]interface{})["name"].(string)
if len(result) > i {
data[paramName] = result[i]
} else {
errMsg := "the response is inconsistent with the response def"
return nil, errors.New(errMsg)
}
}
return data, nil
}
func (c *EternusClient) convStringArray(cmd string, result *bytes.Buffer) [][]string {
output := [][]string{}
for _, s := range strings.Split(result.String(), "\r\n") {
// ignore empty line(first elem)
if s == "" {
continue
}
// ignore echo back string
if strings.HasPrefix(s, "CLI> "+cmd) {
continue
}
// ignore last line and stop parse
if s == "CLI> " {
break
}
output = append(output, strings.Split(s, "\t"))
}
return output
}
// ListStoragePools :
func (c *EternusClient) ListStoragePools() ([]StoragePool, error) {
var pools []StoragePool
ret, err := c.request("show thin-pro-pools", nil)
if err != nil {
return pools, err
}
for _, v := range ret {
id, _ := strconv.ParseInt(v["tpp_number"], 16, 64)
// calculate free capacity
// cut off after the decimal point
totalCapacity, _ := strconv.ParseInt(v["total_capacity"], 16, 64)
usedCapacity, _ := strconv.ParseInt(v["used_capacity"], 16, 64)
totalCapacity = totalCapacity / LBASize
usedCapacity = usedCapacity / LBASize
freeCapacity := totalCapacity - usedCapacity
pool := StoragePool{}
pool.Id = strconv.FormatInt(id, 10)
pool.Name = v["tpp_name"]
pool.TotalCapacity = totalCapacity
pool.FreeCapacity = freeCapacity
pools = append(pools, pool)
}
return pools, nil
}
// ListAllStoragePools :
func (c *EternusClient) ListAllStoragePools() ([]StoragePool, error) {
return c.ListStoragePools()
}
// GetVolume :
func (c *EternusClient) GetVolume(lunID string) (Volume, error) {
param := map[string]string{
"volume-number": lunID,
}
return c.execGetVolume(param)
}
// GetVolumeByName :
func (c *EternusClient) GetVolumeByName(name string) (Volume, error) {
param := map[string]string{
"volume-name": name,
}
return c.execGetVolume(param)
}
func (c *EternusClient) execGetVolume(param map[string]string) (Volume, error) {
var vol Volume
ret, err := c.request("show volumes", param)
if err != nil {
log.Error("failed to get volume information: " + err.Error())
return vol, err
}
v := ret[0]
id, _ := strconv.ParseInt(v["volume_number"], 16, 64)
poolID, _ := strconv.ParseInt(v["pool_number"], 16, 64)
size, _ := strconv.ParseInt(v["size"], 16, 64)
size = size / LBASize
vol.Id = strconv.FormatInt(id, 10)
vol.Name = v["volume_name"]
vol.Size = size
vol.Status = v["status"]
vol.PoolName = v["pool_name"]
vol.PoolId = strconv.FormatInt(poolID, 10)
return vol, nil
}
// CreateVolume :
func (c *EternusClient) CreateVolume(id string, size int64, desc string,
poolName string, provPolicy string) (Volume, error) {
// use hash value because eternus has limitation of name length
name := GetFnvHash(id)
sizeGB := fmt.Sprintf("%dgb", size)
allocation := "thin"
if provPolicy != "Thin" {
allocation = "thick"
}
param := map[string]string{
"name": name,
"size": sizeGB,
"pool-name": poolName,
"type": "tpv",
"allocation": allocation,
}
var vol Volume
ret, err := c.request(CreateVolume, param)
if err != nil {
log.Error("failed to create volume: " + err.Error())
return vol, err
}
v := ret[0]
convID, _ := strconv.ParseInt(v["volume_number"], 16, 64)
vol.Id = strconv.FormatInt(convID, 10)
vol.Name = name
vol.Size = size
return vol, nil
}
// DeleteVolume :
func (c *EternusClient) DeleteVolume(volumeNumber string) error {
param := map[string]string{
"volume-number": volumeNumber,
}
_, err := c.request("delete volume", param)
if err != nil {
log.Error("failed to delete volume: " + err.Error())
return err
}
return nil
}
// ExtendVolume :
func (c *EternusClient) ExtendVolume(volumeNumber string, size int64) error {
param := map[string]string{
"volume-number": volumeNumber,
"size": strconv.FormatInt(size, 10) + "gb",
}
_, err := c.request("expand volume", param)
if err != nil {
log.Error("failed to expand volume: " + err.Error())
return err
}
return nil
}
// AddIscsiHostWithCheck :
func (c *EternusClient) AddIscsiHostWithCheck(name string, iscsiName string, ipAddr string) (string, bool, error) {
// check duplicate host. if already exists, retuen exist host id.
ret, err := c.request("show host-iscsi-names", nil)
if err != nil {
log.Error("failed to show host-iscsi-names: " + err.Error())
return "", false, err
}
for _, v := range ret {
ipStr := ""
if v["ip_version"] == "00" {
ipStr = ParseIPv4(v["host_iscsi_ip_address"])
} else {
ipStr = ParseIPv6(v["host_iscsi_ip_address"])
}
if v["host_iscsi_name"] == iscsiName && EqualIP(ipStr, ipAddr) {
hostNumber, _ := strconv.ParseInt(v["host_iscsi_number"], 16, 64)
return strconv.FormatInt(hostNumber, 10), true, nil
}
}
// create new host
ipVersion := "ipv4"
if !IsIPv4(ipAddr) {
ipVersion = "ipv6"
}
param := map[string]string{
"iscsi-name": iscsiName,
"ip-version": ipVersion,
"name": name,
}
if ipAddr != "" {
param["ip"] = ipAddr
}
ret, err = c.request("create host-iscsi-name", param)
if err != nil {
log.Error("failed to create host-iscsi-name: " + err.Error())
return "", false, err
}
v := ret[0]
hostNumber, _ := strconv.ParseInt(v["host_number"], 16, 64)
return strconv.FormatInt(hostNumber, 10), false, nil
}
// DeleteIscsiHost :
func (c *EternusClient) DeleteIscsiHostByName(name string) error {
param := map[string]string{
"host-name": name,
}
ret, err := c.request("delete host-iscsi-name", param)
if err != nil {
if len(ret) == 1 && ret[0]["errorCode"] == NotFound {
log.Info("target iscsi host already deleted")
return nil
}
log.Error("failed to delete host-iscsi-name: " + err.Error())
return err
}
return nil
}
// GetLunGroupByName :
func (c *EternusClient) GetLunGroupByName(name string) (LunGroup, error) {
lunGroup := LunGroup{}
param := map[string]string{
"lg-name": name,
}
ret, err := c.request("show lun-group", param)
if err != nil {
log.Error("failed to show lun-group: " + err.Error())
return lunGroup, err
}
lunGroupVolumes := []LunGroupVolume{}
for _, v := range ret {
vol := LunGroupVolume{}
volID, _ := strconv.ParseInt(v["volume_no"], 16, 64)
hostLunID, _ := strconv.ParseInt(v["lun"], 16, 64)
tmpSize, _ := strconv.ParseInt(v["total_capacity"], 16, 64)
size := tmpSize / LBASize
vol.Id = strconv.FormatInt(volID, 10)
vol.Name = v["volume_name"]
vol.RawStatus = v["volume_raw_status"]
vol.RoundStatus = v["volume_round_status"]
vol.Size = size
vol.Uid = v["uid"]
vol.Lun = strconv.FormatInt(hostLunID, 10)
lunGroupVolumes = append(lunGroupVolumes, vol)
}
lunGroup.Volumes = lunGroupVolumes
return lunGroup, nil
}
// AddLunGroupWithCheck :
func (c *EternusClient) AddLunGroupWithCheck(lgName string, lunID string) (string, error) {
// check lunGrp
ret, err := c.request("show lun-groups", nil)
if err != nil {
log.Error("failed to show lun-groups: " + err.Error())
return "", err
}
lgNumberStr := ""
for _, v := range ret {
if v["lun_group_name"] == lgName {
lgNumber, _ := strconv.ParseInt(v["lun_group_no"], 10, 64)
lgNumberStr = strconv.FormatInt(lgNumber, 10)
break
}
}
// if already exists for the target host, add volume to the lunGrp.
if lgNumberStr != "" {
param := map[string]string{
"volume-number": lunID,
"lg-number": lgNumberStr,
}
ret, err = c.request("set lun-group", param)
if err != nil {
log.Error("failed to set lun-group: " + err.Error())
return "", err
}
return lgNumberStr, nil
}
// if does not exists for the target host, create new lunGrp.
lun := "0"
param := map[string]string{
"name": lgName,
"volume-number": lunID,
"lun": lun,
}
ret, err = c.request("create lun-group", param)
if err != nil {
log.Error("failed to create lun-group: " + err.Error())
return "", err
}
v := ret[0]
lunNumber, _ := strconv.ParseInt(v["lun_group_number"], 16, 64)
return strconv.FormatInt(lunNumber, 10), nil
}
// RemoveVolumeFromLunGroup :
func (c *EternusClient) RemoveVolumeFromLunGroup(lunID string, lgName string) error {
param := map[string]string{
"lg-name": lgName,
"lun": lunID,
}
_, err := c.request("delete lun-group", param)
if err != nil {
log.Error("failed to remove volume from lun-group: " + err.Error())
return err
}
return nil
}
// DeleteLunGroup :
func (c *EternusClient) DeleteLunGroupByName(lgName string) error {
param := map[string]string{
"lg-name": lgName,
}
ret, err := c.request("delete lun-group", param)
if err != nil {
if len(ret) == 1 && ret[0]["errorCode"] == NotFound {
log.Info("target lun group already deleted")
return nil
}
log.Error("failed to delete lun-group: " + err.Error())
return err
}
return nil
}
// GetIscsiPortInfo :
func (c *EternusClient) GetIscsiPortInfo(ceSupport bool, needHostAffinity bool) (IscsiPortInfo, error) {
portInfo := IscsiPortInfo{}
// select port
ret, err := c.request("show iscsi-parameters", nil)
if err != nil {
log.Error("failed to get iscsi-parameters: " + err.Error())
return portInfo, err
}
usePort, portNumber := c.getConnectionPort(ret, ceSupport, needHostAffinity)
if portNumber == "" {
msg := "there is no iscsi port."
log.Error(msg)
return portInfo, errors.New(msg)
}
tcpPort, _ := strconv.ParseInt(usePort["tcp_port_number"], 16, 64)
isnsPort, _ := strconv.ParseInt(usePort["isns_server_port"], 16, 64)
portInfo.PortNumber = portNumber
portInfo.IscsiName = usePort["iscsi_name"]
portInfo.Ip = usePort["ip_address"]
portInfo.TcpPort = int(tcpPort)
portInfo.IsnsServerIp = usePort["isns_server_ip"]
portInfo.IsnsServerPort = int(isnsPort)
return portInfo, nil
}
// GetFcPortInfo :
func (c *EternusClient) GetFcPortInfo(ceSupport bool, needHostAffinity bool) (FcPortInfo, error) {
portInfo := FcPortInfo{}
// select port
ret, err := c.request("show fc-parameters", nil)
if err != nil {
log.Error("failed to get fc-parameters: " + err.Error())
return portInfo, err
}
usePort, portNumber := c.getConnectionPort(ret, ceSupport, needHostAffinity)
if portNumber == "" {
msg := "there is no fc port."
log.Error(msg)
return portInfo, errors.New(msg)
}
portInfo.PortNumber = portNumber
portInfo.Wwpn = usePort["wwpn"]
return portInfo, nil
}
func (c *EternusClient) getConnectionPort(portList []map[string]string,
ceSupport bool, needHostAffinity bool) (map[string]string, string) {
port := ""
usePort := map[string]string{}
for _, v := range portList {
// if port_mode is not "CA" and "CA/RA", skip
if v["port_mode"] != "00" && v["port_mode"] != "04" {
continue
}
if v["host_affinity"] == "00" && needHostAffinity {
usePort = v
break
} else if v["host_affinity"] != "00" && !needHostAffinity {
usePort = v
break
}
}
if len(usePort) == 0 {
return usePort, port
}
if ceSupport {
port = GetPortNumberV2(usePort["ca_module_id"], usePort["port_number"])
} else {
port = GetPortNumber(usePort["ca_module_id"], usePort["port_number"])
}
return usePort, port
}
// AddHostAffinity :
func (c *EternusClient) AddHostAffinity(lunGrpID string, hostID string, iscsiPort string) (string, error) {
// create new host affinity
param := map[string]string{
"port": iscsiPort,
"lg-number": lunGrpID,
"host-number": hostID,
}
ret, err := c.request("set host-affinity", param)
if err != nil {
log.Error("failed to set host-affinity: " + err.Error())
return "", err
}
v := ret[0]
lunMaskGroupNo, _ := strconv.ParseInt(v["lun_mask_group_no"], 16, 64)
return strconv.FormatInt(lunMaskGroupNo, 10), nil
}
// DeleteHostAffinity :
func (c *EternusClient) DeleteHostAffinity(portNumber string, hostname string) error {
param := map[string]string{
"port": portNumber,
"host-name": hostname,
"mode": "all",
}
ret, err := c.request("release host-affinity", param)
if err != nil {
if len(ret) == 1 && ret[0]["errorCode"] == NotFound {
log.Info("target host affinity already deleted")
return nil
}
log.Error("failed to release host-affinity: " + err.Error())
return err
}
return nil
}
// GetHostLunID :
func (c *EternusClient) GetHostLunID(lunGrpID string, lunID string) (string, error) {
param := map[string]string{
"lg-number": lunGrpID,
}
ret, err := c.request("show lun-group", param)
if err != nil {
log.Error("failed to get lun-group: " + err.Error())
return "", err
}
var hostLunID int64
for _, v := range ret {
volID, _ := strconv.ParseInt(v["volume_no"], 16, 64)
if strconv.FormatInt(volID, 10) == lunID {
hostLunID, _ = strconv.ParseInt(v["lun"], 16, 64)
}
}
return strconv.FormatInt(hostLunID, 10), nil
}
// AddFcHostWithCheck :
func (c *EternusClient) AddFcHostWithCheck(name string, wwnName string) (string, bool, error) {
// check duplicate host. if already exists, retuen exist host id.
ret, err := c.request("show host-wwn-names", nil)
if err != nil {
log.Error("failed to show host-wwn-names: " + err.Error())
return "", false, err
}
for _, v := range ret {
if strings.ToUpper(v["host_wwn_name"]) == strings.ToUpper(wwnName) {
hostNumber, _ := strconv.ParseInt(v["host_wwn_no"], 16, 64)
return strconv.FormatInt(hostNumber, 10), true, nil
}
}
// create new host
param := map[string]string{
"wwn": wwnName,
"name": name,
}
ret, err = c.request("create host-wwn-name", param)
if err != nil {
log.Error("failed to create host-wwn-name: " + err.Error())
return "", true, err
}
v := ret[0]
hostNumber, _ := strconv.ParseInt(v["host_number"], 16, 64)
return strconv.FormatInt(hostNumber, 10), false, nil
}
// DeleteFcHost :
func (c *EternusClient) DeleteFcHostByName(name string) error {
param := map[string]string{
"host-name": name,
}
_, err := c.request("delete host-wwn-name", param)
if err != nil {
log.Error("failed to delete host-wwn-name: " + err.Error())
return err
}
return nil
}
// ListMapping :
func (c *EternusClient) ListMapping(port string) ([]Mapping, error) {
mappings := []Mapping{}
param := map[string]string{
"port": port,
}
ret, err := c.request("show mapping", param)
if err != nil {
log.Error("failed to show mapping: " + err.Error())
return nil, err
}
for _, v := range ret {
lun, _ := strconv.ParseInt(v["lun"], 16, 64)
volID, _ := strconv.ParseInt(v["volume_number"], 16, 64)
tmpSize, _ := strconv.ParseInt(v["volume_size"], 16, 64)
size := tmpSize / LBASize
tmpMap := Mapping{}
tmpMap.Lun = strconv.FormatInt(lun, 10)
tmpMap.VolumeNumber = strconv.FormatInt(volID, 10)
tmpMap.VolumeName = v["volume_name"]
tmpMap.VolumeRawStatus = v["volume_raw_status"]
tmpMap.VolumeRoundStatus = v["volume_round_status"]
tmpMap.VolumeSize = size
mappings = append(mappings, tmpMap)
}
return mappings, nil
}
// AddMapping :
func (c *EternusClient) AddMapping(lunID string, hostLunID string, port string) error {
param := map[string]string{
"port": port,
"volume-number": lunID,
"lun": hostLunID,
}
_, err := c.request("set mapping", param)
if err != nil {
log.Error("failed to set mapping: " + err.Error())
return err
}
return nil
}
// DeleteMapping :
func (c *EternusClient) DeleteMapping(hostLunID string, Port string) error {
param := map[string]string{
"port": Port,
"lun": hostLunID,
}
_, err := c.request("release mapping", param)
if err != nil {
log.Error("failed to release mapping: " + err.Error())
return err
}
return nil
}
// CreateSnapshot is for admin role
func (c *EternusClient) CreateSnapshot(srcLunID string, destLunID string) error {
param := map[string]string{
"source-volume-number": srcLunID,
"destination-volume-number": destLunID,
}
_, err := c.request("start advanced-copy", param)
if err != nil {
log.Error("failed to start advanced-copy: " + err.Error())
return err
}
return nil
}
// ListSnapshot is for admin role
func (c *EternusClient) ListSnapshot() ([]SnapShot, error) {
param := map[string]string{
"type": "sopc+",
}
cmd := "show advanced-copy-sessions"
ret, err := c.requestForAdmin(cmd, param)
if err != nil {
log.Error("failed to show advanced-copy-sessions: " + err.Error())
return nil, err
}
output := [][]string{}
for i, s := range strings.Split(ret.String(), "\r\n") {
// ignore empty line(first elem)
if i < 5 {
continue
}
// ignore last line and stop parse
if s == "CLI> " {
break
}
output = append(output, strings.Split(s, " "))
}
snapshotList := []SnapShot{}
for _, v := range output {
sp := []string{}
snapshot := SnapShot{}
for _, e := range v {
if e != "" {
sp = append(sp, e)
}
}
snapshot.Sid = sp[0]
snapshot.Gen = sp[1]
snapshot.GenTotal = sp[2]
snapshot.Type = sp[3]
snapshot.VolumeType = sp[4]
snapshot.SrcNo = sp[5]
snapshot.SrcName = sp[6]
snapshot.DestNo = sp[7]
snapshot.DestName = sp[8]
snapshot.Status = sp[9]
snapshot.Phase = sp[10]
snapshot.ErrorCode = sp[11]
snapshot.Requestor = sp[12]
snapshotList = append(snapshotList, snapshot)
}
return snapshotList, nil
}
// DeleteSnapshot is for admin role
func (c *EternusClient) DeleteSnapshot(sid string) error {
param := map[string]string{
"session-id": sid,
}
_, err := c.requestForAdmin("stop advanced-copy", param)
if err != nil {
log.Error("failed to stop advanced-copy: " + err.Error())
errID := strings.Split(err.Error(), " ")[1]
if errID == ("E" + NotFound) {
log.Info("target snapshot session already deleted. Ignore the error.")
return nil
}
return err
}
return nil
}
|
package routeshandlers
import (
"net/http"
"github.com/gin-gonic/gin"
)
// GetAllNoDataJSON simple no data handler
func GetAllNoDataJSON(c *gin.Context) {
c.JSON(http.StatusNotFound, gin.H{"msg": "No data"})
}
// Saved Simple saved status
func Saved(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"msg": "Saved"})
}
// Created simple created status
func Created(c *gin.Context, data interface{}) {
c.JSON(http.StatusCreated, gin.H{"msg": "Created", "data": data})
}
// BadRequest simple bad request
func BadRequest(c *gin.Context) {
c.JSON(http.StatusBadRequest, gin.H{"msg": "Bad request"})
}
// Deleted Successfull
func Deleted(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"msg": "deleted"})
}
// Unauthorized Response with abort
func Unauthorized(c *gin.Context) {
c.JSON(http.StatusUnauthorized, gin.H{"msg": "Unauthorized"})
c.Abort()
}
// Forbidden Response with abort
func Forbidden(c *gin.Context) {
c.JSON(http.StatusForbidden, gin.H{"msg": "Forbidden"})
c.Abort()
}
|
package main
func countPrimes(n int) int { // 筛法求质数
a := make([]bool, n)
cnt := 0
for i := 2; i < n; i++ {
if a[i] {
continue
}
for j := i * i; j < n; j += i {
a[j] = true
}
cnt++
}
return cnt
}
|
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
)
func main() {
scan := func() func() int {
scan := bufio.NewScanner(os.Stdin)
scan.Split(bufio.ScanWords)
return func() int {
scan.Scan()
i, _ := strconv.Atoi(scan.Text())
return i
}
}()
n := scan()
sticks := make([]int, n)
for i := range sticks {
sticks[i] = scan()
}
cut := 0
for {
var cnt int
cnt, cut = cutBy(sticks, cut)
if cnt == 0 {
break
}
fmt.Println(cnt)
}
}
func cutBy(s []int, n int) (int, int) {
cnt := 0
min := math.MaxInt32
for _, v := range s {
if v > n {
cnt++
if v < min {
min = v
}
}
}
return cnt, min
}
|
package group
import (
"Open_IM/internal/push/content_struct"
"Open_IM/internal/push/logic"
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/constant"
"Open_IM/pkg/common/db"
"Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/log"
"Open_IM/pkg/grpc-etcdv3/getcdv3"
pbChat "Open_IM/pkg/proto/chat"
pbGroup "Open_IM/pkg/proto/group"
"Open_IM/pkg/utils"
"context"
"google.golang.org/grpc"
"net"
"strconv"
"strings"
"time"
)
type groupServer struct {
rpcPort int
rpcRegisterName string
etcdSchema string
etcdAddr []string
}
func NewGroupServer(port int) *groupServer {
log.NewPrivateLog("group")
return &groupServer{
rpcPort: port,
rpcRegisterName: config.Config.RpcRegisterName.OpenImGroupName,
etcdSchema: config.Config.Etcd.EtcdSchema,
etcdAddr: config.Config.Etcd.EtcdAddr,
}
}
func (s *groupServer) Run() {
log.Info("", "", "rpc group init....")
ip := utils.ServerIP
registerAddress := ip + ":" + strconv.Itoa(s.rpcPort)
//listener network
listener, err := net.Listen("tcp", registerAddress)
if err != nil {
log.InfoByArgs("listen network failed,err=%s", err.Error())
return
}
log.Info("", "", "listen network success, address = %s", registerAddress)
defer listener.Close()
//grpc server
srv := grpc.NewServer()
defer srv.GracefulStop()
//Service registers with etcd
pbGroup.RegisterGroupServer(srv, s)
err = getcdv3.RegisterEtcd(s.etcdSchema, strings.Join(s.etcdAddr, ","), ip, s.rpcPort, s.rpcRegisterName, 10)
if err != nil {
log.ErrorByArgs("get etcd failed,err=%s", err.Error())
return
}
err = srv.Serve(listener)
if err != nil {
log.ErrorByArgs("listen rpc_group error,err=%s", err.Error())
return
}
log.Info("", "", "rpc create group init success")
}
func (s *groupServer) CreateGroup(ctx context.Context, req *pbGroup.CreateGroupReq) (*pbGroup.CreateGroupResp, error) {
log.InfoByArgs("rpc create group is server,args=%s", req.String())
var (
groupId string
)
//Parse token, to find current user information
claims, err := utils.ParseToken(req.Token)
if err != nil {
log.Error(req.Token, req.OperationID, "err=%s,parse token failed", err.Error())
return &pbGroup.CreateGroupResp{ErrorCode: config.ErrParseToken.ErrCode, ErrorMsg: config.ErrParseToken.ErrMsg}, nil
}
//Time stamp + MD5 to generate group chat id
groupId = utils.Md5(strconv.FormatInt(time.Now().UnixNano(), 10))
err = im_mysql_model.InsertIntoGroup(groupId, req.GroupName, req.Introduction, req.Notification, req.FaceUrl, req.Ex)
if err != nil {
log.ErrorByKv("create group chat failed", req.OperationID, "err=%s", err.Error())
return &pbGroup.CreateGroupResp{ErrorCode: config.ErrCreateGroup.ErrCode, ErrorMsg: config.ErrCreateGroup.ErrMsg}, nil
}
isMagagerFlag := 0
tokenUid := claims.UID
if utils.IsContain(tokenUid, config.Config.Manager.AppManagerUid) {
isMagagerFlag = 1
}
if isMagagerFlag == 0 {
//Add the group owner to the group first, otherwise the group creation will fail
us, err := im_mysql_model.FindUserByUID(claims.UID)
if err != nil {
log.Error("", req.OperationID, "find userInfo failed", err.Error())
return &pbGroup.CreateGroupResp{ErrorCode: config.ErrCreateGroup.ErrCode, ErrorMsg: config.ErrCreateGroup.ErrMsg}, nil
}
err = im_mysql_model.InsertIntoGroupMember(groupId, claims.UID, us.Name, us.Icon, constant.GroupOwner)
if err != nil {
log.Error("", req.OperationID, "create group chat failed,err=%s", err.Error())
return &pbGroup.CreateGroupResp{ErrorCode: config.ErrCreateGroup.ErrCode, ErrorMsg: config.ErrCreateGroup.ErrMsg}, nil
}
err = db.DB.AddGroupMember(groupId, claims.UID)
if err != nil {
log.Error("", "", "create mongo group member failed, db.DB.AddGroupMember fail [err: %s]", err.Error())
return &pbGroup.CreateGroupResp{ErrorCode: config.ErrCreateGroup.ErrCode, ErrorMsg: config.ErrCreateGroup.ErrMsg}, nil
}
}
//Binding group id and member id
for _, user := range req.MemberList {
us, err := im_mysql_model.FindUserByUID(user.Uid)
if err != nil {
log.Error("", req.OperationID, "find userInfo failed,uid=%s", user.Uid, err.Error())
continue
}
err = im_mysql_model.InsertIntoGroupMember(groupId, user.Uid, us.Name, us.Icon, user.SetRole)
if err != nil {
log.ErrorByArgs("pull %s to group %s failed,err=%s", user.Uid, groupId, err.Error())
}
err = db.DB.AddGroupMember(groupId, user.Uid)
if err != nil {
log.Error("", "", "add mongo group member failed, db.DB.AddGroupMember fail [err: %s]", err.Error())
}
}
if isMagagerFlag == 1 {
//type NotificationContent struct {
// IsDisplay int32 `json:"isDisplay"`
// DefaultTips string `json:"defaultTips"`
// Detail string `json:"detail"`
//} n := NotificationContent{
// IsDisplay: 1,
// DefaultTips: "You have joined the group chat:" + createGroupResp.Data.GroupName,
// Detail: createGroupResp.Data.GroupId,
// }
////Push message when create group chat
n := content_struct.NotificationContent{1, req.GroupName, groupId}
logic.SendMsgByWS(&pbChat.WSToMsgSvrChatMsg{
SendID: claims.UID,
RecvID: groupId,
Content: n.ContentToString(),
SendTime: utils.GetCurrentTimestampByNano(),
MsgFrom: constant.SysMsgType, //Notification message identification
ContentType: constant.CreateGroupTip, //Add friend flag
SessionType: constant.GroupChatType,
OperationID: req.OperationID,
})
}
log.Info(req.Token, req.OperationID, "rpc create group success return")
return &pbGroup.CreateGroupResp{GroupID: groupId}, nil
}
|
package main
import "bufio"
import "fmt"
import "os"
import "sort"
import "strconv"
type input struct {
arr []int
}
func main() {
i := getInput()
maxSetSize := 0
for x := 0; x < len(i.arr)-1; x++ {
currentSetSize := 1
for y := x + 1; y < len(i.arr); y++ {
if i.arr[y]-i.arr[x] <= 1 {
currentSetSize++
} else {
break
}
}
if currentSetSize > maxSetSize {
maxSetSize = currentSetSize
}
}
fmt.Println(maxSetSize)
}
func getInput() (i input) {
scanner := bufio.NewScanner(os.Stdin)
scanner.Split(bufio.ScanWords)
scanner.Scan()
numInts, _ := strconv.Atoi(scanner.Text())
i.arr = make([]int, numInts)
for x := 0; x < numInts; x++ {
scanner.Scan()
i.arr[x], _ = strconv.Atoi(scanner.Text())
}
sort.Ints(i.arr)
return i
}
|
/**
* Copyright (c) 2018 ZTE Corporation.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* ZTE - initial Project
*/
package models
const (
EnvConsulAddress = "ConsulAddress" //http://localhost:8500
EnvK8sAddress = "K8sAddress"
EnvMsbAddress = "MsbAddress"
EnvApiGatewayName = "MsbApiGatewayName" // default value "apigateway"
)
|
package gogen
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// Please note that this test suite refers to the
// test_fixtures/simple.go test file.
type ParseBaseTypeSuite struct {
suite.Suite
build *Build
file *File
complexBuild *Build
complexFile *File
iba *BaseType
sba *BaseType
}
func (s *ParseBaseTypeSuite) SetupTest() {
var err error
s.build, err = ParseFile(SimpleFilePath)
assert.Equal(s.T(), nil, err)
s.file = s.build.File(filepath.Base(SimpleFilePath))
s.iba = s.file.BaseType("I")
assert.NotEqual(s.T(), (*BaseType)(nil), s.iba)
s.sba = s.file.BaseType("S")
assert.NotEqual(s.T(), (*BaseType)(nil), s.sba)
s.complexBuild, err = ParseFile(ComplexFilePath)
assert.Equal(s.T(), nil, err)
s.complexFile = s.complexBuild.File(filepath.Base(ComplexFilePath))
assert.NotEqual(s.T(), (*File)(nil), s.complexFile)
}
// the parsing capability is already tested by the
// compiler, we test only the results of the ParseStruct
// that is already called by the ParseFile function in the
// test setup
func (s *ParseBaseTypeSuite) TestParseInt() {
assert.Equal(s.T(), "I", s.iba.Name())
assert.Equal(s.T(), "int", s.iba.Type())
}
func (s *ParseBaseTypeSuite) TestParseString() {
assert.Equal(s.T(), "S", s.sba.Name())
assert.Equal(s.T(), "string", s.sba.Type())
}
func TestParseBaseTypeSuite(t *testing.T) {
suite.Run(t, &ParseBaseTypeSuite{})
}
|
package multierr
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestFormatterList(t *testing.T) {
newErrors := func(length int) []error {
errs := make([]error, 0, length)
for i := 0; i < length; i++ {
errs = append(errs, fmt.Errorf("error-%d", i+1))
}
return errs
}
t.Run("len:0", func(t *testing.T) {
require.Equal(t, "no error occurred", FormatterList(newErrors(0)))
})
t.Run("len:1", func(t *testing.T) {
require.Equal(t, "1 error occurred: error-1", FormatterList(newErrors(1)))
})
t.Run("len:2", func(t *testing.T) {
require.Equal(t, `2 errors occurred:
* error-1
* error-2
`, FormatterList(newErrors(2)))
})
}
|
package cache
import (
"github.com/ben-han-cn/g53"
"github.com/ben-han-cn/vanguard/config"
"github.com/ben-han-cn/vanguard/core"
"github.com/ben-han-cn/vanguard/httpcmd"
"github.com/ben-han-cn/vanguard/metrics"
view "github.com/ben-han-cn/vanguard/viewselector"
)
type Cache struct {
core.DefaultHandler
cache map[string]*ViewCache
}
func NewCache(conf *config.VanguardConf) core.DNSQueryHandler {
c := &Cache{}
c.ReloadConfig(conf)
httpcmd.RegisterHandler(c, []httpcmd.Command{&CleanCache{}, &CleanViewCache{}, &CleanDomainCache{}, &CleanRRsetsCache{}, &GetDomainCache{}, &GetMessageCache{}})
return c
}
func (c *Cache) ReloadConfig(conf *config.VanguardConf) {
cache := make(map[string]*ViewCache)
for view, _ := range view.GetViewAndIds() {
if _, exist := c.cache[view]; exist {
cache[view] = c.cache[view]
cache[view].ResetCapacity(int(conf.Cache.MaxCacheSize))
} else {
cache[view] = newViewCache(int(conf.Cache.MaxCacheSize))
}
}
if defaultCache, exist := cache[view.DefaultView]; !exist {
cache[view.DefaultView] = newViewCache(int(conf.Cache.MaxCacheSize))
} else {
defaultCache.ResetCapacity(int(conf.Cache.MaxCacheSize))
}
c.cache = cache
}
func (c *Cache) HandleQuery(ctx *core.Context) {
client := &ctx.Client
msg, found := c.get(client)
client.CacheHit = found
if found {
metrics.RecordCacheHit(client.View)
response := *msg
response.Header.Id = client.Request.Header.Id
response.Header.SetFlag(g53.FLAG_AA, false)
response.Question = client.Request.Question
client.Response = &response
} else {
core.PassToNext(c, ctx)
if client.Response != nil && client.CacheAnswer {
c.AddMessage(client.View, client.Response)
}
}
}
func (c *Cache) AddMessage(view string, msg *g53.Message) {
if messageCache, ok := c.cache[view]; ok {
messageCache.Add(msg)
}
}
func (c *Cache) get(client *core.Client) (*g53.Message, bool) {
if messageCache, ok := c.cache[client.View]; ok {
return messageCache.Get(client.Request)
} else {
return nil, false
}
}
|
package strregex_test
import (
"testing"
"github.com/nandarimansyah/gobasicbenchmark/strregex"
)
func BenchmarkMatchString(b *testing.B) {
for n := 0; n < b.N; n++ {
strregex.IsMatchUsingMatchString("nanda@gmail.com")
}
}
func BenchmarkMatchStringCompiled(b *testing.B) {
for n := 0; n < b.N; n++ {
strregex.IsMatchUsingMatchStringCompiled("nanda@gmail.com")
}
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/Shopify/sarama"
)
func main() {
var msg = "Hello, I'm a message!"
produceMsg(msg)
}
var (
producer sarama.SyncProducer
brokers = []string{"127.0.0.1:9092", "127.0.0.1:9192"}
topic = "saku"
)
func init() {
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForLocal
config.Producer.Retry.Max = 5
config.Producer.Return.Successes = true
brokers := brokers
var err error
producer, err = sarama.NewSyncProducer(brokers, config)
if err != nil {
fmt.Printf("init producer failed -> %v \n", err)
panic(err)
}
fmt.Println("producer init success")
}
func produceMsg(msg string) {
msgX := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder(msg),
}
fmt.Printf("SendMsg -> %v\n", dumpString(msgX))
partition, offset, err := producer.SendMessage(msgX)
if err != nil {
fmt.Printf("send msg error:%s \n", err)
}
fmt.Printf("msg send success, message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset)
}
// parsing json format
func dumpString(v interface{}) (str string) {
bs, err := json.Marshal(v)
b := bytes.Buffer{}
if err != nil {
b.WriteString("{err:\"json format error.")
b.WriteString(err.Error())
b.WriteString("\"}")
} else {
b.Write(bs)
}
str = b.String()
return str
}
|
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// GetService returns the given service if it exists
func GetService(ctx context.Context, c client.Client, humioNodePoolName, humioClusterNamespace string) (*corev1.Service, error) {
var existingService corev1.Service
err := c.Get(ctx, types.NamespacedName{
Namespace: humioClusterNamespace,
Name: humioNodePoolName,
}, &existingService)
return &existingService, err
}
|
package instance_test
import (
"errors"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
fakesys "github.com/cloudfoundry/bosh-agent/system/fakes"
fakebmagentclient "github.com/cloudfoundry/bosh-micro-cli/deployer/agentclient/fakes"
fakebmas "github.com/cloudfoundry/bosh-micro-cli/deployer/applyspec/fakes"
fakebmins "github.com/cloudfoundry/bosh-micro-cli/deployer/instance/fakes"
boshlog "github.com/cloudfoundry/bosh-agent/logger"
bmagentclient "github.com/cloudfoundry/bosh-micro-cli/deployer/agentclient"
bmas "github.com/cloudfoundry/bosh-micro-cli/deployer/applyspec"
bmdepl "github.com/cloudfoundry/bosh-micro-cli/deployment"
bmstemcell "github.com/cloudfoundry/bosh-micro-cli/stemcell"
. "github.com/cloudfoundry/bosh-micro-cli/deployer/instance"
)
var _ = Describe("Instance", func() {
var (
fakeAgentClient *fakebmagentclient.FakeAgentClient
instance Instance
applySpec bmstemcell.ApplySpec
fakeTemplatesSpecGenerator *fakebmins.FakeTemplatesSpecGenerator
fakeApplySpecFactory *fakebmas.FakeApplySpecFactory
deployment bmdepl.Deployment
deploymentJob bmdepl.Job
stemcellJob bmstemcell.Job
fs *fakesys.FakeFileSystem
logger boshlog.Logger
)
BeforeEach(func() {
fakeTemplatesSpecGenerator = fakebmins.NewFakeTemplatesSpecGenerator()
fakeTemplatesSpecGenerator.SetCreateBehavior(TemplatesSpec{
BlobID: "fake-blob-id",
ArchiveSha1: "fake-archive-sha1",
ConfigurationHash: "fake-configuration-hash",
}, nil)
fakeAgentClient = fakebmagentclient.NewFakeAgentClient()
stemcellJob = bmstemcell.Job{
Name: "fake-job-name",
Templates: []bmstemcell.Blob{
{
Name: "first-job-name",
Version: "first-job-version",
SHA1: "first-job-sha1",
BlobstoreID: "first-job-blobstore-id",
},
{
Name: "second-job-name",
Version: "second-job-version",
SHA1: "second-job-sha1",
BlobstoreID: "second-job-blobstore-id",
},
{
Name: "third-job-name",
Version: "third-job-version",
SHA1: "third-job-sha1",
BlobstoreID: "third-job-blobstore-id",
},
},
}
applySpec = bmstemcell.ApplySpec{
Packages: map[string]bmstemcell.Blob{
"first-package-name": bmstemcell.Blob{
Name: "first-package-name",
Version: "first-package-version",
SHA1: "first-package-sha1",
BlobstoreID: "first-package-blobstore-id",
},
"second-package-name": bmstemcell.Blob{
Name: "second-package-name",
Version: "second-package-version",
SHA1: "second-package-sha1",
BlobstoreID: "second-package-blobstore-id",
},
},
Job: stemcellJob,
}
deploymentJob = bmdepl.Job{
Name: "fake-manifest-job-name",
Templates: []bmdepl.ReleaseJobRef{
{Name: "first-job-name"},
{Name: "third-job-name"},
},
RawProperties: map[interface{}]interface{}{
"fake-property-key": "fake-property-value",
},
Networks: []bmdepl.JobNetwork{
{
Name: "fake-network-name",
StaticIPs: []string{"fake-network-ip"},
},
},
}
deployment = bmdepl.Deployment{
Name: "fake-deployment-name",
Jobs: []bmdepl.Job{
deploymentJob,
},
Networks: []bmdepl.Network{
{
Name: "fake-network-name",
Type: "fake-network-type",
},
},
}
fakeApplySpecFactory = fakebmas.NewFakeApplySpecFactory()
logger = boshlog.NewLogger(boshlog.LevelNone)
fs = fakesys.NewFakeFileSystem()
instance = NewInstance(
fakeAgentClient,
fakeTemplatesSpecGenerator,
fakeApplySpecFactory,
"fake-mbus-url",
fs,
logger,
)
})
Describe("Apply", func() {
It("stops the agent", func() {
err := instance.Apply(applySpec, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(fakeAgentClient.StopCalled).To(BeTrue())
})
It("generates templates spec", func() {
err := instance.Apply(applySpec, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(fakeTemplatesSpecGenerator.CreateInputs).To(ContainElement(fakebmins.CreateInput{
DeploymentJob: deploymentJob,
StemcellJob: stemcellJob,
DeploymentName: "fake-deployment-name",
Properties: map[string]interface{}{
"fake-property-key": "fake-property-value",
},
MbusURL: "fake-mbus-url",
}))
})
It("creates apply spec", func() {
err := instance.Apply(applySpec, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(fakeApplySpecFactory.CreateInput).To(Equal(
fakebmas.CreateInput{
ApplySpec: applySpec,
DeploymentName: "fake-deployment-name",
JobName: "fake-manifest-job-name",
NetworksSpec: map[string]interface{}{
"fake-network-name": map[string]interface{}{
"type": "fake-network-type",
"ip": "fake-network-ip",
"cloud_properties": map[string]interface{}{},
},
},
ArchivedTemplatesBlobID: "fake-blob-id",
ArchivedTemplatesSha1: "fake-archive-sha1",
TemplatesDirSha1: "fake-configuration-hash",
},
))
})
It("sends apply spec to the agent", func() {
agentApplySpec := bmas.ApplySpec{
Deployment: "fake-deployment-name",
}
fakeApplySpecFactory.CreateApplySpec = agentApplySpec
err := instance.Apply(applySpec, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(fakeAgentClient.ApplyApplySpec).To(Equal(agentApplySpec))
})
Context("when creating templates spec fails", func() {
BeforeEach(func() {
fakeTemplatesSpecGenerator.CreateErr = errors.New("fake-template-err")
})
It("returns an error", func() {
err := instance.Apply(applySpec, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-template-err"))
})
})
Context("when sending apply spec to the agent fails", func() {
BeforeEach(func() {
fakeAgentClient.ApplyErr = errors.New("fake-agent-apply-err")
})
It("returns an error", func() {
err := instance.Apply(applySpec, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-agent-apply-err"))
})
})
Context("when stopping an agent fails", func() {
BeforeEach(func() {
fakeAgentClient.SetStopBehavior(errors.New("fake-stop-error"))
})
It("returns an error", func() {
err := instance.Apply(applySpec, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-stop-error"))
})
})
})
Describe("Start", func() {
It("starts agent services", func() {
err := instance.Start()
Expect(err).ToNot(HaveOccurred())
Expect(fakeAgentClient.StartCalled).To(BeTrue())
})
Context("when starting an agent fails", func() {
BeforeEach(func() {
fakeAgentClient.SetStartBehavior(errors.New("fake-start-error"))
})
It("returns an error", func() {
err := instance.Start()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-start-error"))
})
})
})
Describe("WaitToBeRunning", func() {
BeforeEach(func() {
fakeAgentClient.SetGetStateBehavior(bmagentclient.State{JobState: "pending"}, nil)
fakeAgentClient.SetGetStateBehavior(bmagentclient.State{JobState: "pending"}, nil)
fakeAgentClient.SetGetStateBehavior(bmagentclient.State{JobState: "running"}, nil)
})
It("waits until agent reports state as running", func() {
err := instance.WaitToBeRunning(5, 0)
Expect(err).ToNot(HaveOccurred())
Expect(fakeAgentClient.GetStateCalledTimes).To(Equal(3))
})
})
})
|
package main
import (
"log"
"encoding/json"
"fmt"
"net/http"
)
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "JEEELOOOU")
}
func main() {
http.HandleFunc("/", handler)
http.HandleFunc("/planet/yavin", planets)
log.Fatal(http.ListenAndServe(":8080", nil))
}
type Planet struct {
Name string
Climate string
Terrain string
}
func planets (w http.ResponseWriter, r *http.Request) {
//Planet{Name: "Yavin IV", Climate: "temperate, tropical", Terrain: "jungle, rainforests"}
planet := Planet{Name: "Yavin IV", Climate: "temperate, tropical", Terrain: "jungle, rainforests"}
body, error := json.Marshal(planet)
if error != nil {
panic(error)
}
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(body)
}
|
package main
func main() {
}
func findPeakElement(nums []int) int {
left, right := 0, len(nums)-1
for left < right {
mid := (left + right) >> 1
if nums[mid] < nums[mid+1] {
left = mid + 1
} else {
right = mid
}
}
return left
}
func findPeakElement2(nums []int) int {
if len(nums) == 1 {
return 0
}
for i := 1; i < len(nums)-1; i++ {
if nums[i] > nums[i-1] && nums[i] > nums[i+1] {
return i
}
}
if nums[0] > nums[1] {
return 0
}
if nums[len(nums)-1] > nums[len(nums)-2] {
return len(nums) - 1
}
return -1
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"mime/multipart"
"strconv"
"time"
)
var (
logger Logger
cfg Config
)
func findServer(app string, server string) (int, error) {
logger.Infof("\n【获取服务列表】")
b := []byte("")
query := fmt.Sprintf(`tree_node_id=1%s.5%s`, app, server)
res, err := HttpRequest("GET", cfg.BaseUrl + "/pages/server/api/server_list?" + query, &b, &map[string]string{
"Content-Type": "application/json",
})
if err != nil {
logger.Errorf("\t获取服务列表失败:%s", err.Error())
return -1, err
}
//logger.Debugf("\t获取服务列表结果:%s", res)
var rsp FindServerRsp
err = json.Unmarshal([]byte(res), &rsp)
if err != nil {
logger.Errorf("\t获取服务列表结果格式不合法:%s", err.Error())
return -1, err
}
if rsp.RetCode != 200 {
logger.Errorf("\t%s", rsp.ErrMsg)
return -1, errors.New(rsp.ErrMsg)
}
if len(rsp.Data) == 0 {
logger.Errorf("\t获取服务列表为空")
return -1, errors.New("获取服务列表为空")
}
svr := rsp.Data[0]
logger.Infof("\t应用名:%s\n\t服务名:%s\n\t节点:%s\n\t服务类型:%s\n\t启用set:%t\n\t设置状态:%s\n\t实时状态:%s\n\t发布时间:%s",
svr.Application, svr.ServerName, svr.NodeName, svr.ServerType, svr.EnableSet, svr.SettingState, svr.PresentState, svr.PatchTime)
return svr.Id, nil
}
func uploadFile(app string, server string, file string, comment string) (int, error) {
logger.Infof("\n【上传文件】")
taskId := strconv.Itoa(int(time.Now().Unix())) + "000"
buf, err := ioutil.ReadFile(file)
if err != nil {
logger.Errorf("\t找不到文件:%s", file)
return -1, err
}
body := new(bytes.Buffer)
w := multipart.NewWriter(body)
_ = w.WriteField("application", app)
_ = w.WriteField("module_name", server)
_ = w.WriteField("comment", comment)
_ = w.WriteField("task_id", taskId)
fw, _ := w.CreateFormFile("suse", file)
_, err = fw.Write(buf)
if err != nil {
logger.Errorf("\t写入文件到formData失败:%s", err.Error())
return -1, err
}
_ = w.Close()
b := body.Bytes()
contentType := w.FormDataContentType()
res, err := HttpRequest("POST", cfg.BaseUrl + "/pages/server/api/upload_patch_package", &b, &map[string]string{
"Content-Type": contentType,
})
if err != nil {
logger.Errorf("\t上传文件到tars平台失败:%s", err.Error())
return -1, err
}
//logger.Debugf("\t上传结果:%s", res)
var rsp UploadFileRsp
err = json.Unmarshal([]byte(res), &rsp)
if err != nil {
logger.Errorf("\t上传文件结果格式不合法:%s", err.Error())
return -1, err
}
if rsp.RetCode != 200 {
logger.Errorf("\t%s", rsp.ErrMsg)
return -1, errors.New(rsp.ErrMsg)
}
data := rsp.Data
logger.Infof("\t服务:%s\n\t上传包:%s",
data.Server, data.Tgz)
return data.Id, nil
}
func addTask(svrId int, uploadId int) (string, error) {
logger.Infof("\n【创建发布任务】")
req := AddTaskReq{
Items: []Item{{
ServerId: strconv.Itoa(svrId),
Command: "patch_tars",
Parameters: Parameters{
BakFlag: false,
PatchId: strconv.Itoa(uploadId),
UpdateText: "",
},
}},
Serial: true,
}
b, _ := json.Marshal(&req)
res, err := HttpRequest("POST", cfg.BaseUrl + "/pages/server/api/add_task", &b, &map[string]string{
"Content-Type": "application/json",
})
if err != nil {
logger.Errorf("\t创建发布任务失败:%s\n", err.Error())
return "", err
}
var rsp AddTaskRsp
err = json.Unmarshal([]byte(res), &rsp)
if err != nil {
logger.Errorf("\t创建发布任务结果格式不合法:%s", err.Error())
return "", err
}
if rsp.RetCode != 200 {
logger.Errorf("\t%s", rsp.ErrMsg)
return "", errors.New(rsp.ErrMsg)
}
logger.Debugf("\t创建发布任务成功,任务ID:%s\n", rsp.Data)
return rsp.Data, nil
}
func checkStatus(taskId string) (bool, error) {
logger.Infof("检查发布状态...")
b := []byte("")
query := fmt.Sprintf(`task_no=%s`, taskId)
res, err := HttpRequest("GET", cfg.BaseUrl + "/pages/server/api/task?" + query, &b, &map[string]string{
"Content-Type": "application/json",
})
if err != nil {
logger.Errorf("\t检查发布状态失败:%s", err.Error())
return false, err
}
//logger.Debugf("\t上传结果:%s", res)
var rsp CheckStatusRsp
err = json.Unmarshal([]byte(res), &rsp)
if err != nil {
logger.Errorf("\t检查发布状态结果格式不合法:%s", err.Error())
return false, err
}
if rsp.RetCode != 200 {
logger.Errorf("\t%s", rsp.ErrMsg)
return false, errors.New(rsp.ErrMsg)
}
items := rsp.Data.Items
for _, item := range items {
if item.TaskId != taskId {
continue
}
if item.Status == 2 {
logger.Infof("\t\n发布成功!")
return true, nil
} else if item.Status == 3 {
logger.Infof("\t\n发布失败!")
return true, nil
}
}
return false, nil
}
func main() {
logger = L{}
b, err := ioutil.ReadFile("config.json")
if err != nil {
logger.Errorf("读取配置文件错误:%s", err.Error())
return
}
err = json.Unmarshal(b, &cfg)
if err != nil {
logger.Errorf("配置文件格式错误:%s", err.Error())
return
}
if svrId, err := findServer(cfg.App, cfg.Server); err == nil {
if uploadId, err := uploadFile(cfg.App, cfg.Server, cfg.Filename, cfg.Comment); err == nil {
if taskId, err := addTask(svrId, uploadId); err == nil {
ticker := time.NewTicker(time.Second)
result := make(chan int)
go func() {
for range ticker.C {
if status, err := checkStatus(taskId); err == nil {
if status {
ticker.Stop()
result<-1
}
}
}
}()
<-result
}
return
}
}
logger.Errorf("\n【操作失败】")
}
|
package es
import (
"syscall"
)
// EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetMatchPath(void);
var dllEverythingGetMatchPath *syscall.LazyProc
func EverythingGetMatchPath() (bool, error) {
r1, _, err := dllEverythingGetMatchPath.Call()
return r1 == 1, checkErr(1, err)
}
// EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetMatchCase(void);
var dllEverythingGetMatchCase *syscall.LazyProc
func EverythingGetMatchCase() (bool, error) {
r1, _, err := dllEverythingGetMatchCase.Call()
return r1 == 1, checkErr(1, err)
}
// EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetMatchWholeWord(void);
var dllEverythingGetMatchWholeWord *syscall.LazyProc
func EverythingGetMatchWholeWord() (bool, error) {
r1, _, err := dllEverythingGetMatchWholeWord.Call()
return r1 == 1, checkErr(1, err)
}
// EVERYTHINGUSERAPI BOOL EVERYTHINGAPI Everything_GetRegex(void);
var dllEverythingGetRegex *syscall.LazyProc
func EverythingGetRegex() (bool, error) {
r1, _, err := dllEverythingGetRegex.Call()
return r1 == 1, checkErr(1, err)
}
// EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetMax(void);
var dllEverythingGetMax *syscall.LazyProc
func EverythingGetMax() (uint32, error) {
r1, _, err := dllEverythingGetMax.Call()
return uint32(r1), checkErr(1, err)
}
// EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetOffset(void);
var dllEverythingGetOffset *syscall.LazyProc
func EverythingGetOffset() (uint32, error) {
r1, _, err := dllEverythingGetOffset.Call()
return uint32(r1), checkErr(1, err)
}
// EVERYTHINGUSERAPI LPCSTR EVERYTHINGAPI Everything_GetSearchA(void);
var dllEverythingGetSearchA *syscall.LazyProc
func everythingGetSearchA() (string, error) {
r1, _, err := dllEverythingGetSearchA.Call()
if err = checkErr(r1, err); err != nil {
return "", err
}
return uintPtrAToString(r1), nil
}
// EVERYTHINGUSERAPI LPCWSTR EVERYTHINGAPI Everything_GetSearchW(void);
var dllEverythingGetSearchW *syscall.LazyProc
func everythingGetSearchW() (string, error) {
r1, _, err := dllEverythingGetSearchW.Call()
if err = checkErr(r1, err); err != nil {
return "", err
}
return uintPtrWToString(r1), nil
}
// EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetLastError(void);
// EverythingOk 跳转查看所有错误码
var dllEverythingGetLastError *syscall.LazyProc
func EverythingGetLastError() error {
r1, _, err := dllEverythingGetLastError.Call()
if e, ok := err.(syscall.Errno); ok && e == 0 {
return EverythingErr(r1) // 返回得到的具体错误
}
return err
}
// EVERYTHINGUSERAPI HWND EVERYTHINGAPI Everything_GetReplyWindow(void);
var dllEverythingGetReplyWindow *syscall.LazyProc
func EverythingGetReplyWindow() (syscall.Handle, error) {
r1, _, err := dllEverythingGetReplyWindow.Call()
return syscall.Handle(r1), checkErr(1, err)
}
// EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetReplyID(void);
var dllEverythingGetReplyID *syscall.LazyProc
func EverythingGetReplyID() (uint32, error) {
r1, _, err := dllEverythingGetReplyID.Call()
return uint32(r1), checkErr(1, err)
}
// EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetSort(void); // Everything 1.4.1
// EverythingSortNameAscending 可以跳转到枚举位置,查看所有排序类型
var dllEverythingGetSort *syscall.LazyProc
func EverythingGetSort() (uint32, error) {
r1, _, err := dllEverythingGetSort.Call()
return uint32(r1), checkErr(1, err)
}
// EVERYTHINGUSERAPI DWORD EVERYTHINGAPI Everything_GetRequestFlags(void); // Everything 1.4.1
// EverythingRequestFileName 可以跳转到枚举位置,查看所有结果数据类型
var dllEverythingGetRequestFlags *syscall.LazyProc
func EverythingGetRequestFlags() (uint32, error) {
r1, _, err := dllEverythingGetRequestFlags.Call()
return uint32(r1), checkErr(1, err)
}
|
package mira
import "net/http"
// Init is used
// when we initialize the Reddit instance,
// automatically start a goroutine that will
// update the token every 45 minutes. The
// auto_refresh should not be accessible to
// the end user as it is an internal method
func Init(c Credentials) (*Reddit, error) {
auth, err := Authenticate(&c)
if err != nil {
return nil, err
}
auth.Client = &http.Client{}
auth.SetDefault()
go auth.autoRefresh()
return auth, nil
}
|
package controllers
import (
"database/sql"
r "github.com/dancewing/revel"
"github.com/dancewing/revel/orm"
"github.com/dancewing/yysrevel/app/models"
)
type GorpController struct {
*r.Controller
Txn *orm.Transaction
}
func (c *GorpController) Begin() r.Result {
txn, err := orm.Database().Get().Begin()
if err != nil {
panic(err)
}
c.Txn = txn
return nil
}
func (c *GorpController) Commit() r.Result {
if c.Txn == nil {
return nil
}
if err := c.Txn.Commit(); err != nil && err != sql.ErrTxDone {
panic(err)
}
c.Txn = nil
return nil
}
func (c *GorpController) Rollback() r.Result {
if c.Txn == nil {
return nil
}
if err := c.Txn.Rollback(); err != nil && err != sql.ErrTxDone {
panic(err)
}
c.Txn = nil
return nil
}
func (c *GorpController) Connected() *models.User {
if c.ViewArgs["user"] != nil {
return c.ViewArgs["user"].(*models.User)
}
if username, ok := c.Session["user"]; ok {
return c.getUser(username)
}
return nil
}
func (c *GorpController) getUser(username string) *models.User {
users, err := c.Txn.Select(models.User{}, `select * from User_ where login = ?`, username)
if err != nil {
panic(err)
}
if len(users) == 0 {
return nil
}
return users[0].(*models.User)
}
|
package main
import (
"fmt"
"unicode"
"github.com/jnewmano/advent2020/input"
"github.com/jnewmano/advent2020/output"
)
func main() {
sum := parta()
fmt.Println(sum)
}
func parta() interface{} {
//input.SetRaw(raw)
// var things = input.Load()
// var things = input.LoadSliceSliceString("")
var things = input.LoadSliceString("\n\n")
var counts []int
for _, v := range things {
answers := process(v)
counts = append(counts, len(answers))
}
return output.Sum(counts)
}
func process(s string) map[rune]rune {
u := make(map[rune]rune)
for _, v := range s {
if unicode.IsSpace(v) {
continue
}
u[v] = v
}
return u
}
var _ = output.High(nil)
var raw = `abc
a
b
c
ab
ac
a
a
a
a
b`
|
package series
import "strings"
func All(n int, s string) []string {
if n > len(s) {
return nil
}
if n == 1 {
return strings.Split(s,"")
}
output := []string{}
for i := 0 ; i <= len(s)-n; i++ {
output = append(output, s[i:i+n])
}
return output
}
func UnsafeFirst(n int, s string) string {
return s[:n]
}
func First(n int,s string) (first string, ok bool) {
if n > len(s) || 0 >= n {
return "", false
}
return s[:n], true
}
|
package main
import (
"fmt"
"time"
)
func cronjob() {
ticker := time.Tick(1 * time.Second)
for {
select {
case <-ticker:
totalRead++
if totalRead > 10 {
return
}
fmt.Println("run 1s cronjob:", totalRead)
}
}
}
|
// Package page has a function to return a list of all butler gen.Pages.
package page
import (
"path/filepath"
"github.com/jwowillo/butler/recipe"
"github.com/jwowillo/gen"
)
// List of all butler gen.Pages with static files in the web directory and
// recipe.Recipes rs to be injected into the static files.
//
// The passed web directory must have a directory called 'tmpl' that contains
// template files 'base.html', 'index.html', 'recipes.js', and 'recipe.html'.
//
// Returns an error if any of the gen.Pages couldn't be created.
func List(web string, rs []recipe.Recipe) ([]gen.Page, error) {
ps, err := tmpls(web, rs)
if err != nil {
return nil, err
}
as, err := gen.Assets(filepath.Join(web, "asset"))
if err != nil {
return nil, err
}
return append(ps, as...), nil
}
// recipeTemplate is the struct for a recipe.Recipe which is injected into
// templates.
type recipeTemplate struct {
Path string
Name string
Description string
Ingredients []ingredientTemplate
Steps []string
Notes []string
}
// newRecipeTemplate makes a recipeTemplate from a recipe.Recipe.
func newRecipeTemplate(r recipe.Recipe) recipeTemplate {
is := make([]ingredientTemplate, 0, len(r.Ingredients))
for _, i := range r.Ingredients {
is = append(is, ingredientTemplate{
Ingredient: i,
SingularPhrase: recipe.SingularPhrase(i),
PluralPhrase: recipe.PluralPhrase(i),
FractionalPhrase: recipe.FractionalPhrase(i),
})
}
return recipeTemplate{
Path: r.Path,
Name: r.Name,
Description: r.Description,
Ingredients: is,
Steps: r.Steps,
Notes: r.Notes,
}
}
// ingredientTemplate is the struct for a recipe.Ingredient which is injected
// into templates.
type ingredientTemplate struct {
recipe.Ingredient
SingularPhrase string
PluralPhrase string
FractionalPhrase string
}
// tmpls is a helper function to return a list of all the gen.Templates for
// butler or an error if the gen.Templates couldn't be created.
func tmpls(web string, rs []recipe.Recipe) ([]gen.Page, error) {
rts := make([]recipeTemplate, 0, len(rs))
for _, r := range rs {
rts = append(rts, newRecipeTemplate(r))
}
paths := func(ps ...string) []string {
out := make([]string, 0, len(ps))
for _, p := range ps {
out = append(out, filepath.Join(web, "tmpl", p))
}
return out
}
hp, err := gen.NewTemplate(
paths("base.html", "index.html"),
rts,
"/index.html",
)
if err != nil {
return nil, err
}
jsp, err := gen.NewTemplate(paths("recipes.js"), rts, "/recipes.js")
if err != nil {
return nil, err
}
rps := make([]gen.Page, 0, len(rts))
for _, rt := range rts {
rp, err := gen.NewTemplate(
paths("base.html", "recipe.html"),
rt,
filepath.Join(rt.Path, "index.html"),
)
if err != nil {
return nil, err
}
rps = append(rps, rp)
}
return append(rps, hp, jsp), nil
}
|
package method_interface
import (
"errors"
"fmt"
"strconv"
"time"
)
// 通常函数会返回一个error值,调用此函数的代码应该判断error值是否为nil来进行错误处理
func ErrorDemo() {
s, error := strconv.Atoi("abc")
if error == nil {
fmt.Println(s)
} else {
fmt.Printf("convert error, %v\n", error)
}
}
func Devide(i1, i2 int64) (int64, error) {
if i2 == 0 {
return 0, errors.New("被除数不能为0")
}
return i1 / i2, nil
}
// 自定义error类型,实现了 Error() 方法
type MyError struct {
When time.Time
What string
}
func (e *MyError) Error() string {
return fmt.Sprintf("at %v, %s",
e.When, e.What)
}
// error类型是go的内建接口
func Run() error {
fmt.Println("run ...")
return &MyError{
time.Now(),
"it didn't work",
}
}
|
package users_test
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
. "cinemo.com/shoping-cart/internal/users"
mocks "cinemo.com/shoping-cart/mocks/users"
"cinemo.com/shoping-cart/pkg/pointer"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/mock"
)
func Test_signUpHandler(t *testing.T) {
tests := []struct {
name string
givenJSONReqFilepath string
givenUserServiceArgs []interface{}
givenUserServiceReturnValues []interface{}
expectedJSONRespFilepath string
expectedStatusCode int
}{
{
name: "ideal case success handler",
givenJSONReqFilepath: "testdata/signup/success/request.json",
givenUserServiceArgs: []interface{}{mock.Anything, "username", "password", pointer.String("firstname"), pointer.String("lastname")},
expectedJSONRespFilepath: "testdata/signup/success/response.json",
givenUserServiceReturnValues: []interface{}{&User{
ID: int64(1),
FirstName: pointer.String("firstname"),
LastName: pointer.String("lastname"),
Password: "zxzx",
Username: "username",
CreatedAt: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
UpdatedAt: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
}, nil},
expectedStatusCode: http.StatusCreated,
},
{
name: "ideal case success handler without first/lastname",
givenJSONReqFilepath: "testdata/signup/success/request_without_first_lastname.json",
givenUserServiceArgs: []interface{}{mock.Anything, "username", "password", mock.Anything, mock.Anything},
expectedJSONRespFilepath: "testdata/signup/success/response_without_first_lastname.json",
givenUserServiceReturnValues: []interface{}{&User{
ID: int64(1),
Password: "zxzx",
Username: "username",
CreatedAt: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
UpdatedAt: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
}, nil},
expectedStatusCode: http.StatusCreated,
},
{
name: "invalid request",
givenJSONReqFilepath: "testdata/signup/failure/invalid_request",
givenUserServiceArgs: []interface{}{},
expectedJSONRespFilepath: "testdata/signup/failure/invalid_request_response.json",
givenUserServiceReturnValues: []interface{}{},
expectedStatusCode: http.StatusBadRequest,
},
{
name: "invalid request",
givenJSONReqFilepath: "testdata/signup/failure/request_without_username.json",
givenUserServiceArgs: []interface{}{},
expectedJSONRespFilepath: "testdata/signup/failure/response_of_request_without_username.json",
givenUserServiceReturnValues: []interface{}{},
expectedStatusCode: http.StatusBadRequest,
},
{
name: "invalid request",
givenJSONReqFilepath: "testdata/signup/failure/request_without_password.json",
givenUserServiceArgs: []interface{}{},
expectedJSONRespFilepath: "testdata/signup/failure/response_of_request_without_password.json",
givenUserServiceReturnValues: []interface{}{},
expectedStatusCode: http.StatusBadRequest,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
userService := new(mocks.Service)
userService.On("CreateUser", tt.givenUserServiceArgs...).Return(tt.givenUserServiceReturnValues...)
// Load test fixtures
input, err := ioutil.ReadFile(tt.givenJSONReqFilepath)
if err != nil {
t.Fatalf("Cannot read %v", tt.givenJSONReqFilepath)
}
expected, err := ioutil.ReadFile(tt.expectedJSONRespFilepath)
if err != nil {
t.Fatalf("Cannot read %v", tt.expectedJSONRespFilepath)
}
r := httptest.NewRequest(http.MethodPost, "/v1/api/users/singup", bytes.NewBuffer(input))
w := httptest.NewRecorder()
// when
SignUpHandler(userService)(w, r)
// then
// prettify so we can match with expected file
resp := w.Result()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("cannot read response: +%v", resp.Body)
}
var result interface{}
err = json.Unmarshal(body, &result)
if err != nil {
t.Fatalf("cannot unmarshal response: %v", string(body))
}
var expectedResult interface{}
err = json.Unmarshal(expected, &expectedResult)
if err != nil {
t.Fatalf("cannot unmarshal response: %v", string(expected))
}
// Assert:
if !cmp.Equal(tt.expectedStatusCode, resp.StatusCode) {
t.Errorf("status code mismatch diff: %v", cmp.Diff(tt.expectedStatusCode, resp.StatusCode))
}
if !cmp.Equal(expectedResult, result) {
t.Errorf("expected response diff: %v %v", result, cmp.Diff(expectedResult, result))
}
})
}
}
|
package main
import (
"fmt"
"net/http"
"persons.com/api/infrastructure/server"
)
func main() {
errs := make(chan error, 2)
go func() {
fmt.Println("Listening on port :5000")
errs <- http.ListenAndServe(server.HttpPort(), server.StartRouter())
}()
<-errs
}
//app flow: Domain -> Service -> useCases -> Repository -> Serializers(json, messagePack, grpc, soap, etc) -> Handlers(controllers) -> Transporter(http, websockets, GraphQl etc.)
|
package main
func getPivot(slice []int) int {
pivot := slice[0]
return pivot
}
func quickSort(slice []int, getPivot int) {
if len(slice) < 2 {
return
}
left, right := 0, len(slice)-1
pivot := getPivot
slice[pivot], slice[right] = slice[right], slice[pivot]
for i := range slice {
if slice[i] < slice[right] {
slice[left], slice[i] = slice[i], slice[left]
left++
}
}
slice[left], slice[right] = slice[right], slice[left]
quickSort(slice[:left], getPivot)
quickSort(slice[left+1:], getPivot)
}
func main() {
testSlice := []int{1,2,4,-5,3,4,2,13,-6,5,3,-4,2}
quickSort(testSlice, getPivot(testSlice))
}
|
package models
//轮播图
type Carousel struct {
Id uint `json:"id" gorm:"primaryKey;not null;autoIncrement;comment:'主键'"`
Pid uint `json:"pid" gorm:"bigint(20);not null;comment:'商品id'" `
ImgUrl string `json:"img_url" gorm:"type:varchar(200);not null;comment:'图片地址'"`
IsPlay bool `json:"is_play" gorm:"type:tinyint(4);default:0;commit:'0不播放,1播放'" `
}
//查询轮播图所有数据
func GetCarouselList()([]Carousel,error){
var carousels []Carousel
err := Db.Find(&carousels,"is_play = true").Error
return carousels,err
}
|
package dp
type EmployeeSummaryStatus string
const (
EmployeeSummaryStatusEmpty EmployeeSummaryStatus = "empty"
)
|
package main
import (
"fmt"
"strconv"
"github.com/freignat91/mlearning/api"
"github.com/spf13/cobra"
)
// PropagateCmd .
var PropagateCmd = &cobra.Command{
Use: "propagate",
Short: "push value to input layer value1, value2, ...",
Run: func(cmd *cobra.Command, args []string) {
if err := mlCli.propagate(cmd, args); err != nil {
mlCli.Fatal("Error: %v\n", err)
}
},
}
func init() {
NetworkCmd.AddCommand(PropagateCmd)
}
func (m *mlCLI) propagate(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
m.Fatal("at lest one argument is mandatory\n")
}
values := make([]float64, 0)
for ii := 0; ii < len(args); ii++ {
value, _ := strconv.ParseFloat(args[ii], 64)
values = append(values, value)
}
api := mlapi.New(m.server)
outs, err := api.Propagate(values)
if err != nil {
return err
}
fmt.Printf("Outs: %v\n", outs)
return nil
}
|
// ˅
package main
// ˄
type IDisplay interface {
GetColumns() int
GetRows() int
GetLineText(row int) string
// ˅
// ˄
}
// ˅
// ˄
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.