text stringlengths 11 4.05M |
|---|
package patch
// Operation 对资源的操作类型
// 使用 JsonPatchType 进行描述
// 参考: https://jsonpatch.com/
type Operation string
// JsonPatch 中支持的操作
const (
Add Operation = "add"
Remove Operation = "remove"
Replace Operation = "replace"
Copy Operation = "copy"
Move Operation = "move"
Test Operation = "test"
)
// Patch 表示了对资源的操作信息
type Patch struct {
Op Operation `json:"op,inline"`
Path string `json:"path,inline"`
Value interface{} `json:"value"`
}
|
package utils
//
// rand.go
// Copyright (C) 2020 light <light@1870499383@qq.com>
//
// Distributed under terms of the MIT license.
//
import (
"math/rand"
"time"
"unsafe"
)
var letters = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
var size = int32(len(letters))
var seed = rand.New(rand.NewSource(time.Now().UnixNano()))
// RandSeq produce random string seq
func RandSeq(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letters[seed.Int31n(size)]
}
return *(*string)(unsafe.Pointer(&b))
}
func Rand(n int) []byte {
b := make([]byte, n)
seed.Read(b)
return b
}
|
package ravendb
// Note: IndexQueryWithParameters is part of IndexQuery in index_query.go
|
package kus
import (
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"log"
"mhsykongzhiqi/moxings"
)
func Charuyinpinmimajiu(moxing *moxings.Yinpinmimajius) bool {
cr := Jichucaozuo().Create(moxing)
if cr.Error != nil {
log.Println("Yinpinmimajius----cr.Error---", cr.Error)
return false
}
return true
}
func Chaxunyigeyinpinmimajiu(moxing moxings.Yinpinmimajius) *moxings.Yinpinmimajius {
find := Jichucaozuo().Find(&moxings.Yinpinmimajius{}, moxing)
if find.Error != nil && find.Error == gorm.ErrRecordNotFound {
log.Println("Chaxunyigeyinpinmimajiu--find.Error---", find.Error)
return nil
}
ret := find.Value.(*moxings.Yinpinmimajius)
return ret
}
|
package main
import (
"assist/db"
assist_db "assist/db"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"sync"
"time"
gorilla_context "github.com/gorilla/context"
"github.com/gorilla/mux"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type AuthenticatedLevel uint8
const (
myself AuthenticatedLevel = 1 << iota
squadMember
squadAdmin
squadOwner
systemAdmin
)
func (app *App) checkAuthorization(r *http.Request, userId string, squadId string, requiredLevel AuthenticatedLevel) (_ string, level AuthenticatedLevel) {
currentUserId := app.sd.getCurrentUserID(r)
if app.dev {
defer TimeTrack("checkAuthorization "+r.URL.Path+" for user "+currentUserId, time.Now())
}
sd := app.sd.getCurrentUserData(r)
if sd.Status == db.Admin {
level = systemAdmin
}
if userId == "me" {
userId = currentUserId
if sd.Status != db.PendingApprove {
level = level | (myself & requiredLevel)
}
}
if squadId != "" {
status, err := app.db.GetSquadMemberStatus(r.Context(), currentUserId, squadId)
if err == nil {
switch status {
case assist_db.Member:
level = level | (squadMember & requiredLevel)
case assist_db.Admin:
level = level | (squadAdmin & requiredLevel)
case assist_db.Owner:
level = level | (squadOwner & requiredLevel)
}
}
}
gorilla_context.Set(r, "AuthChecked", true)
return userId, level
}
// method handlers
func (app *App) methodCreateSquad(w http.ResponseWriter, r *http.Request) error {
var squad struct{ Name string }
err := json.NewDecoder(r.Body).Decode(&squad)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
squadId := squad.Name
userId, authLevel := app.checkAuthorization(r, "me", "", myself)
if authLevel == 0 {
// operation is not authorized, return error
err := fmt.Errorf("Current user %v is not authorized to create squads", userId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
ctx := r.Context()
err = app.db.CreateSquad(ctx, squadId, userId)
if err != nil {
st, ok := status.FromError(err)
err = fmt.Errorf("Failed to create squad %v: %w", squadId, err)
if ok && st.Code() == codes.AlreadyExists {
http.Error(w, err.Error(), http.StatusConflict)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
return nil
}
func (app *App) methodGetUserSquads(w http.ResponseWriter, r *http.Request) error {
ctx := r.Context()
params := mux.Vars(r)
userId := params["userId"]
v := r.URL.Query()
status := v.Get("status")
// authorization check
authLevel := myself
if userId != "me" {
authLevel = systemAdmin
}
userId, authLevel = app.checkAuthorization(r, userId, "", authLevel)
if authLevel == 0 {
// operation is not authorized, return error
err := fmt.Errorf("Current user is not authorized to get squads for user %v", userId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
user_squads, err := app.db.GetUserSquadsMap(ctx, userId, status, authLevel&systemAdmin != 0)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(user_squads)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
return nil
}
func (app *App) methodGetSquads(w http.ResponseWriter, r *http.Request) error {
ctx := r.Context()
// authorization check
userId, authLevel := app.checkAuthorization(r, "me", "", myself)
if authLevel == 0 {
// operation is not authorized, return error
err := fmt.Errorf("Current user %v is not authorized to get squads", userId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
other_squads, err := app.db.GetOtherSquads(ctx, userId)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(other_squads)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
return err
}
func (app *App) methodDeleteSquad(w http.ResponseWriter, r *http.Request) error {
params := mux.Vars(r)
ctx := r.Context()
// authorization check
squadId := params["id"]
if _, authLevel := app.checkAuthorization(r, "", squadId, squadOwner); authLevel == 0 {
err := fmt.Errorf("Current user is not authorized to delete squad " + squadId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
err := app.db.DeleteSquad(ctx, squadId)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
return nil
}
func (app *App) methodGetSquad(w http.ResponseWriter, r *http.Request) error {
params := mux.Vars(r)
squadId := params["id"]
_, authLevel := app.checkAuthorization(r, "", squadId, squadMember|squadOwner|squadAdmin)
if authLevel == 0 {
err := fmt.Errorf("Current user is not authenticated to get squad " + squadId + " details")
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
errs := make([]error, 3)
var ret struct {
*db.SquadInfo
OwnerInfo *db.UserData `json:"ownerInfo"`
Admins []*db.SquadUserInfoRecord `json:"admins"`
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
ret.SquadInfo, errs[0] = app.db.GetSquad(r.Context(), squadId)
if errs[0] == nil {
ret.OwnerInfo, errs[2] = app.db.GetUserData(r.Context(), ret.SquadInfo.Owner)
}
wg.Done()
}()
go func() {
filter := map[string]string{"Status": "Admin"}
ret.Admins, errs[1] = app.db.GetSquadMembers(r.Context(), squadId, nil, &filter)
wg.Done()
}()
wg.Wait()
for _, err := range errs {
if err != nil {
err = fmt.Errorf("Failed to retrieve squad %v info: %w", squadId, err)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err := json.NewEncoder(w).Encode(ret)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
return err
}
func (app *App) methodGetSquadMembers(w http.ResponseWriter, r *http.Request) (err error) {
params := mux.Vars(r)
ctx := r.Context()
squadId := params["id"]
v := r.URL.Query()
from := v.Get("from")
var timeFrom time.Time
if from != "" {
timeFrom, err = time.Parse(time.RFC3339, from)
if err != nil {
err = fmt.Errorf("Failed to convert from to a time struct: %w", err)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
}
filter := map[string]string{
"Keys": v.Get("keys"),
"Status": v.Get("status"),
"Tag": v.Get("tag"),
"Notes": v.Get("notes"),
}
_, authLevel := app.checkAuthorization(r, "", squadId, squadAdmin|squadOwner)
if authLevel == 0 {
err := fmt.Errorf("Current user is not authenticated to get squad " + squadId + " details")
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
squadMembers, err := app.db.GetSquadMembers(ctx, squadId, &timeFrom, &filter)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(squadMembers)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
return err
}
func (app *App) methodAddMemberToSquad(w http.ResponseWriter, r *http.Request) error {
params := mux.Vars(r)
ctx := r.Context()
squadId := params["squadId"]
userId := params["userId"]
var memberStatus assist_db.MemberStatusType
userId, authLevel := app.checkAuthorization(r, userId, squadId, myself|squadAdmin|squadOwner)
if authLevel&(squadOwner|squadAdmin|systemAdmin) != 0 {
memberStatus = assist_db.Member
} else if authLevel&myself != 0 {
memberStatus = assist_db.PendingApprove
} else {
err := fmt.Errorf("Current user is not authorized to to add user " + userId + " to squad " + squadId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
squadInfo, err := app.db.AddMemberToSquad(ctx, userId, squadId, memberStatus)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
if memberStatus == assist_db.PendingApprove {
go func() {
squadAdmins, err := app.db.GetSquadMemberIds(context.Background(), squadId, []int{int(assist_db.Admin), int(assist_db.Owner)}, "")
if err != nil {
log.Println("Failed to get list of squad "+squadId+" admins, will not be able to create notifications: %v", err)
}
app.ntfs.createNotification(squadAdmins, "Approve New Member", "User "+app.sd.getCurrentUserData(r).DisplayName+" wants to join "+squadId)
}()
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(squadInfo)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
return nil
}
func (app *App) methodCreateReplicant(w http.ResponseWriter, r *http.Request) error {
params := mux.Vars(r)
ctx := r.Context()
squadId := params["squadId"]
_, authLevel := app.checkAuthorization(r, "", squadId, squadAdmin|squadOwner)
if authLevel == 0 {
err := fmt.Errorf("Current user is not authorized to to add replicant to squad " + squadId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
var replicantInfo assist_db.UserInfo
err := json.NewDecoder(r.Body).Decode(&replicantInfo)
if err != nil {
err = fmt.Errorf("Failed to decode replicant data from the HTTP request: %w", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
replicantId, err := app.db.CreateReplicant(ctx, &replicantInfo, squadId)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
err = json.NewEncoder(w).Encode(struct{ ReplicantId string }{replicantId})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
return nil
}
func (app *App) methodUpdateSquadMember(w http.ResponseWriter, r *http.Request) error {
params := mux.Vars(r)
ctx := r.Context()
squadId := params["squadId"]
userId := params["userId"]
var data struct {
Status *assist_db.MemberStatusType `json:"status"`
Notes *map[string]string `json:"notes"`
}
err := json.NewDecoder(r.Body).Decode(&data)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return err
}
// authorization check
userId, authLevel := app.checkAuthorization(r, userId, squadId, squadAdmin|squadOwner)
if authLevel == 0 {
// operation is not authorized, return error
err := fmt.Errorf("Current user is not authorized to change user " + userId + " status in squad " + squadId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
switch {
case data.Status != nil:
err = app.db.SetSquadMemberStatus(ctx, userId, squadId, *data.Status)
case data.Notes != nil:
err = app.db.SetSquadMemberNotes(ctx, userId, squadId, data.Notes)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return err
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
return nil
}
func (app *App) methodDeleteMemberFromSquad(w http.ResponseWriter, r *http.Request) error {
params := mux.Vars(r)
ctx := r.Context()
squadId := params["squadId"]
userId := params["userId"]
// authorization check
userId, authLevel := app.checkAuthorization(r, userId, squadId, myself|squadOwner|squadAdmin)
if authLevel == 0 {
// operation is not authorized, return error
err := fmt.Errorf("Current user is not authorized to remove user " + userId + " from squad " + squadId)
log.Println(err.Error())
http.Error(w, err.Error(), http.StatusUnauthorized)
return err
}
err := app.db.DeleteMemberFromSquad(ctx, userId, squadId)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
return nil
}
|
package main
import (
"encoding/json"
"io"
"net/http"
"net/url"
"path"
"github.com/pkg/errors"
"go.uber.org/zap"
)
// Colors is response of api
type Colors struct {
Colors []struct {
Value string `json:"value"`
} `json:"colors"`
}
func decodeBody(body io.Reader, out interface{}) error {
decoder := json.NewDecoder(body)
return decoder.Decode(out)
}
// NoopClient is APIclient
type NoopClient struct {
URL *url.URL
maxRetryNumber uint64
HTTPClient *http.Client
DefaultHeader http.Header
authHeader string
}
// NewNoopClient is constructor
func NewNoopClient(endpointURL, secretKey, userAgent string, maxRetry uint64, httpClient *http.Client) (*NoopClient, error) {
if len(endpointURL) == 0 {
return nil, errors.New("invalid url")
}
parsedURL, err := url.ParseRequestURI(endpointURL)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse url: %s", endpointURL)
}
client := &NoopClient{
URL: parsedURL,
maxRetryNumber: maxRetry,
HTTPClient: httpClient,
DefaultHeader: make(http.Header),
}
client.authHeader = secretKey //TODO
if userAgent != "" {
client.DefaultHeader.Set("User-Agent", userAgent)
}
client.DefaultHeader.Set("Content-Type", "application/json; charset=utf-8")
return client, nil
}
// RequestOptions is custom option for each request
type RequestOptions struct {
Params map[string]string
Headers map[string]string
Body io.Reader
}
// rawRequest 汎用的なrequest作成
func (c *NoopClient) rawRequest(method, subPath string, ro *RequestOptions) (*http.Request, error) {
if method == "" {
return nil, errors.New("missing requestMethod")
}
if ro == nil {
return nil, errors.New("missing RequestOptions")
}
//URLに subPath/paramsを追加
u := *c.URL
u.Path = path.Join(c.URL.Path, subPath)
var params = make(url.Values)
for k, v := range ro.Params {
params.Add(k, v)
}
u.RawQuery = params.Encode()
request, err := http.NewRequest(method, u.String(), ro.Body)
if err != nil {
return nil, err
}
// default headers をセット
for k, v := range c.DefaultHeader {
request.Header[k] = v
}
// request headersの追加分はここでセット
for k, v := range ro.Headers {
request.Header.Add(k, v)
}
return request, nil
}
// Get is function of example of http.Get
func (c *NoopClient) Get() (Colors, error) {
var colors Colors
req, err := c.rawRequest("GET", "hexbot", &RequestOptions{
Body: nil,
Headers: map[string]string{
"Authorization": c.authHeader,
},
})
resp, err := c.HTTPClient.Do(req)
_ = err
defer resp.Body.Close()
err = decodeBody(resp.Body, &colors)
return colors, err
}
func main() {
logger, _ := zap.NewDevelopment()
endpointURL := "https://api.noopschallenge.com"
secretKey := "secret"
client, err := NewNoopClient(endpointURL, secretKey, "", 7, &http.Client{})
_ = err
colors, err := client.Get()
_ = err
logger.Info("get colors", zap.Reflect("colors", colors))
}
|
package main
import (
"fmt"
"sort"
"strings"
"github.com/liblxn/lxnc/internal/cldr"
)
type locale struct {
packageName string
tags *tagLookupVar
parentTags *parentTagLookupVar
regionContainment *regionContainmentLookupVar
}
func newLocale(packageName string, tags *tagLookupVar, parentTags *parentTagLookupVar, regionContainment *regionContainmentLookupVar) *locale {
return &locale{
packageName: packageName,
tags: tags,
parentTags: parentTags,
regionContainment: regionContainment,
}
}
func (l *locale) imports() []string {
return []string{"github.com/liblxn/lxnc/internal/errors"}
}
func (l *locale) generate(p *printer) {
root := cldr.Identity{Language: "und"}
newLocale := "NewLocale"
if strings.ToLower(l.packageName) == "locale" {
newLocale = "New"
}
p.Println(`const root Locale = `, l.tags.tagID(root))
p.Println()
p.Println(`// Locale represents a reference to the data of a CLDR locale.`)
p.Println(`type Locale tagID`)
p.Println()
p.Println(`// `, newLocale, ` looks up a locale from the given tag (e.g. "en-US"). If the tag is`)
p.Println(`// malformed or cannot be found in the CLDR specification, an error will`)
p.Println(`// be returned.`)
p.Println(`func `, newLocale, `(tag string) (Locale, error) {`)
p.Println(` var p localeTagParser`)
p.Println(` if err := p.parse(tag); err != nil {`)
p.Println(` return 0, err`)
p.Println(` }`)
p.Println()
p.Println(` lang := langTags.langID(p.lang)`)
p.Println(` script := scriptTags.scriptID(p.script)`)
p.Println(` region := regionTags.regionID(p.region)`)
p.Println(` switch {`)
p.Println(` case lang == 0:`)
p.Println(` return 0, errors.Newf("unsupported language: %s", p.lang)`)
p.Println(` case len(p.script) != 0 && script == 0:`)
p.Println(` return 0, errors.Newf("unsupported script: %s", p.script)`)
p.Println(` }`)
p.Println()
p.Println(` var tagID tagID`)
p.Println(` if len(p.region) == 0 || region != 0 {`)
p.Println(` tagID = localeTags.tagID(lang, script, region)`)
p.Println(` }`)
p.Println(` if tagID == 0 && len(p.region) != 0 {`)
p.Println(` var parents [2]regionID`)
p.Println(` nparents := regionContainment.containmentIDs(p.region, parents[:])`)
p.Println(` if nparents == 0 && region == 0 {`)
p.Println(` return 0, errors.Newf("unsupported region: %s", p.region)`)
p.Println(` }`)
p.Println()
p.Println(` for i := 0; i < nparents; i++ {`)
p.Println(` if tagID = localeTags.tagID(lang, script, parents[i]); tagID != 0 {`)
p.Println(` break`)
p.Println(` }`)
p.Println(` }`)
p.Println(` }`)
p.Println()
p.Println(` if tagID == 0 {`)
p.Println(` return 0, errors.Newf("locale not found: %s", tag)`)
p.Println(` }`)
p.Println(` return Locale(tagID), nil`)
p.Println(`}`)
p.Println()
p.Println(`// Subtags returns the language, script, and region subtags of the locale. If one`)
p.Println(`// of the subtags are not specified, an empty string will be returned for this subtag.`)
p.Println(`func (l Locale) Subtags() (lang string, script string, region string) {`)
p.Println(` langID, scriptID, regionID := l.tagIDs()`)
p.Println(` if langID != 0 {`)
p.Println(` lang = `, l.tags.langs.name, `.lang(langID)`)
p.Println(` } else {`)
p.Println(` lang = "und"`)
p.Println(` }`)
p.Println(` if scriptID != 0 {`)
p.Println(` script = `, l.tags.scripts.name, `.script(scriptID)`)
p.Println(` }`)
p.Println(` if regionID != 0 {`)
p.Println(` region = `, l.tags.regions.name, `.region(regionID)`)
p.Println(` }`)
p.Println(` return`)
p.Println(`}`)
p.Println()
p.Println(`// String returns the string represenation of the locale.`)
p.Println(`func (l Locale) String() string {`)
p.Println(` const sep = '-'`)
p.Println(` var buf [12]byte`)
p.Println()
p.Println(` lang, script, region := l.Subtags()`)
p.Println(` n := copy(buf[:], lang)`)
p.Println(` if script != "" {`)
p.Println(` buf[n] = sep`)
p.Println(` n++`)
p.Println(` n += copy(buf[n:], script)`)
p.Println(` }`)
p.Println(` if region != "" {`)
p.Println(` buf[n] = sep`)
p.Println(` n++`)
p.Println(` n += copy(buf[n:], region)`)
p.Println(` }`)
p.Println(` return string(buf[:n])`)
p.Println(`}`)
p.Println()
p.Println(`func (l Locale) tagIDs() (langID, scriptID, regionID) {`)
p.Println(` tag := `, l.tags.name, `.tag(tagID(l))`)
p.Println(` return tag.langID(), tag.scriptID(), tag.regionID()`)
p.Println(`}`)
p.Println()
p.Println(`func (l Locale) parent() Locale {`)
p.Println(` if parentID := `, l.parentTags.name, `.parentID(tagID(l)); parentID != 0 {`)
p.Println(` return Locale(parentID)`)
p.Println(` }`)
p.Println()
p.Println(` // truncate locale`)
p.Println(` langID, scriptID, regionID := l.tagIDs()`)
p.Println(` if regionID != 0 {`)
p.Println(` regionID = 0`)
p.Println(` if tid := `, l.tags.name, `.tagID(langID, scriptID, regionID); tid != 0 {`)
p.Println(` return Locale(tid)`)
p.Println(` }`)
p.Println(` }`)
p.Println(` if scriptID != 0 {`)
p.Println(` scriptID = 0`)
p.Println(` if tid := `, l.tags.name, `.tagID(langID, scriptID, regionID); tid != 0 {`)
p.Println(` return Locale(tid)`)
p.Println(` }`)
p.Println(` }`)
p.Println(` return root`)
p.Println(`}`)
// locale tag parser
p.Println()
p.Println(`type localeTagParser struct {`)
p.Println(` s string`)
p.Println(` tok string`)
p.Println(` idx int`)
p.Println(` buf [10]byte`)
p.Println()
p.Println(` lang []byte`)
p.Println(` script []byte`)
p.Println(` region []byte`)
p.Println(`}`)
p.Println()
p.Println(`func (p *localeTagParser) parse(tag string) error {`)
p.Println(` if tag == "" {`)
p.Println(` return errors.New("empty locale tag")`)
p.Println(` }`)
p.Println()
p.Println(` p.s = tag`)
p.Println(` p.idx = 0`)
p.Println(` p.next()`)
p.Println()
p.Println(` p.lang = p.parseLang()`)
p.Println(` if len(p.lang) == 0 {`)
p.Println(` if len(p.tok) == 0 {`)
p.Println(` return errors.Newf("malformed locale tag: %s", tag)`)
p.Println(` }`)
p.Println(` return errors.Newf("invalid language subtag: %s", p.tok)`)
p.Println(` }`)
p.Println(` p.script = p.parseScript()`)
p.Println(` p.region = p.parseRegion()`)
p.Println()
p.Println(` if len(p.tok) != 0 {`)
p.Println(` return errors.Newf("unsupported locale suffix: %s", p.s[p.idx:])`)
p.Println(` }`)
p.Println(` return nil`)
p.Println(`}`)
p.Println()
p.Println(`func (p *localeTagParser) parseLang() []byte {`)
p.Println(` var lang []byte`)
p.Println(` switch len(p.tok) {`)
p.Println(` case 2: // alpha{2}`)
p.Println(` lang = p.buf[:2]`)
p.Println(` lang[0] = p.tok[0] | 0x20 // lowercase`)
p.Println(` lang[1] = p.tok[1] | 0x20 // lowercase`)
p.Println(` case 3: // alpha{3}`)
p.Println(` lang = p.buf[:3]`)
p.Println(` lang[0] = p.tok[0] | 0x20 // lowercase`)
p.Println(` lang[1] = p.tok[1] | 0x20 // lowercase`)
p.Println(` lang[2] = p.tok[2] | 0x20 // lowercase`)
p.Println(` default:`)
p.Println(` return nil`)
p.Println(` }`)
p.Println(` p.next()`)
p.Println(` return lang`)
p.Println(`}`)
p.Println()
p.Println(`func (p *localeTagParser) parseScript() []byte {`)
p.Println(` if len(p.tok) != 4 {`)
p.Println(` return nil`)
p.Println(` }`)
p.Println(` script := p.buf[3:7]`)
p.Println(` script[0] = p.tok[0] & 0xdf // uppercase`)
p.Println(` script[1] = p.tok[1] | 0x20 // lowercase`)
p.Println(` script[2] = p.tok[2] | 0x20 // lowercase`)
p.Println(` script[3] = p.tok[3] | 0x20 // lowercase`)
p.Println(` p.next()`)
p.Println(` return script`)
p.Println(`}`)
p.Println()
p.Println(`func (p *localeTagParser) parseRegion() []byte {`)
p.Println(` var region []byte`)
p.Println(` switch len(p.tok) {`)
p.Println(` case 2: // alpha{2}`)
p.Println(` region = p.buf[7:9]`)
p.Println(` region[0] = p.tok[0] & 0xdf // uppercase`)
p.Println(` region[1] = p.tok[1] & 0xdf // uppercase`)
p.Println(` case 3: // digit{3}`)
p.Println(` region = p.buf[7:10]`)
p.Println(` region[0] = p.tok[0]`)
p.Println(` region[1] = p.tok[1]`)
p.Println(` region[2] = p.tok[2]`)
p.Println(` default:`)
p.Println(` return nil`)
p.Println(` }`)
p.Println(` p.next()`)
p.Println(` return region`)
p.Println(`}`)
p.Println()
p.Println(`func (p *localeTagParser) next() {`)
p.Println(` start := p.idx`)
p.Println(` for p.idx < len(p.s) {`)
p.Println(` if c := p.s[p.idx]; c == '-' || c == '_' {`)
p.Println(` p.tok = p.s[start:p.idx]`)
p.Println(` p.idx++`)
p.Println(` return`)
p.Println(` }`)
p.Println(` p.idx++`)
p.Println(` }`)
p.Println(` p.tok = p.s[start:p.idx]`)
p.Println(`}`)
}
func (l *locale) testImports() []string {
return []string{"strings"}
}
func (l *locale) generateTest(p *printer) {
newLocale := "NewLocale"
if strings.ToLower(l.packageName) == "locale" {
newLocale = "New"
}
newTagID := func(id cldr.Identity) string {
return fmt.Sprintf("%d", l.tags.tagID(id))
}
p.Println(`func Test`, newLocale, `(t *testing.T) {`)
p.Println(` expected := map[string]Locale{ // tag => locale`)
l.tags.iterate(func(id cldr.Identity) {
tagID := newTagID(id)
key := id.String()
underscoreKey := strings.Replace(key, "-", "_", -1)
lowerKey := strings.ToLower(key)
upperKey := strings.ToUpper(key)
p.Print(` "`, key, `": `, tagID)
if underscoreKey != key {
p.Print(`, "`, underscoreKey, `": `, tagID)
}
if lowerKey != key {
p.Print(`, "`, lowerKey, `": `, tagID)
}
if upperKey != key {
p.Print(`, "`, upperKey, `": `, tagID)
}
p.Println(`,`)
})
p.Println(` }`)
p.Println()
p.Println(` for tag, expectedLoc := range expected {`)
p.Println(` loc, err := `, newLocale, `(tag)`)
p.Println(` switch {`)
p.Println(` case err != nil:`)
p.Println(` t.Errorf("unexpected error for %s: %v", tag, err)`)
p.Println(` case loc != expectedLoc:`)
p.Println(` t.Errorf("unexpected locale for %s: %s", tag, loc.String())`)
p.Println(` }`)
p.Println(` }`)
p.Println(`}`)
p.Println()
p.Println(`func Test`, newLocale, `WithRegionContainments(t *testing.T) {`)
p.Println(` expected := map[string]Locale{ // tag => locale`)
parentIDs := make(map[string][]cldr.Identity) // truncated parent region tag => parent identities
l.tags.iterate(func(id cldr.Identity) {
if len(id.Territory) == 3 && '0' <= id.Territory[0] && id.Territory[0] <= '9' {
tag := id.Truncate().String()
parentIDs[tag] = append(parentIDs[tag], id)
}
})
parentTags := make([]string, 0, len(parentIDs))
for tag := range parentIDs {
parentTags = append(parentTags, tag)
}
sort.Strings(parentTags)
localeContainments := make([]_localeContainment, 0, 2*len(parentIDs))
for _, tag := range parentTags {
pids := parentIDs[tag]
containments := make([]_localeContainment, len(pids))
for i, pid := range pids {
children := l.regionContainment.childrenOf(pid.Territory)
// filter out existing tags
c := make([]string, 0, len(children))
for _, child := range children {
id := pid
id.Territory = child
if !l.tags.containsTag(id) {
c = append(c, child)
}
}
sort.Strings(c)
containments[i] = _localeContainment{
id: pid,
childRegions: c,
}
}
sort.Sort(_localeContainmentsByChildRegionCount(containments))
// remove duplicate child regions
for i := 1; i < len(containments); i++ {
existingChildren := make(map[string]struct{})
for _, containment := range containments[:i] {
for _, child := range containment.childRegions {
existingChildren[child] = struct{}{}
}
}
children := make([]string, 0, len(containments[i].childRegions))
id := containments[i].id
for _, child := range containments[i].childRegions {
id.Territory = child
if _, exists := existingChildren[child]; !exists {
children = append(children, child)
}
}
containments[i].childRegions = children
}
localeContainments = append(localeContainments, containments...)
}
for _, containment := range localeContainments {
id := containment.id
tagID := newTagID(id)
id.Territory = containment.childRegions[0]
p.Print(` "`, id.String(), `": `, tagID)
for k := 1; k < len(containment.childRegions); k++ {
id.Territory = containment.childRegions[k]
p.Print(`, "`, id.String(), `": `, tagID)
}
p.Println(`,`)
}
p.Println(` }`)
p.Println()
p.Println(` for tag, expectedLoc := range expected {`)
p.Println(` loc, err := `, newLocale, `(tag)`)
p.Println(` switch {`)
p.Println(` case err != nil:`)
p.Println(` t.Errorf("unexpected error for %s: %v", tag, err)`)
p.Println(` case loc != expectedLoc:`)
p.Println(` t.Errorf("unexpected locale for %s: %s", tag, loc.String())`)
p.Println(` }`)
p.Println(` }`)
p.Println(`}`)
p.Println()
p.Println(`func Test`, newLocale, `WithInvalidTag(t *testing.T) {`)
p.Println(` expectedErrors := map[string]string{ // tag => error prefix`)
p.Println(` "": "empty locale tag",`)
p.Println(` "-DE": "malformed locale tag",`)
p.Println(` "overlong-DE": "invalid language subtag",`)
p.Println(` "de-DE-suffix": "unsupported locale suffix",`)
p.Println(` "ZZ": "unsupported language",`)
p.Println(` "en-4444-US": "unsupported script",`)
p.Println(` "de-zzz": "unsupported region",`)
p.Println(` "de-001": "locale not found",`)
p.Println(` }`)
p.Println()
p.Println(` for tag, errorPrefix := range expectedErrors {`)
p.Println(` _, err := `, newLocale, `(tag)`)
p.Println(` switch {`)
p.Println(` case err == nil:`)
p.Println(` t.Errorf("expected error for %s, got none", tag)`)
p.Println(` case !strings.HasPrefix(err.Error(), errorPrefix):`)
p.Println(` t.Errorf("unexpected error message for %s: %s", tag, err.Error())`)
p.Println(` }`)
p.Println(` }`)
p.Println(`}`)
p.Println()
p.Println(`func TestLocaleSubtags(t *testing.T) {`)
p.Println(` expected := map[Locale][4]string{ // tag id => (lang, script, region, locale string)`)
l.tags.iterate(func(id cldr.Identity) {
tagID := fmt.Sprintf("%#0[2]*[1]x", l.tags.tagID(id), tagIDBits/4)
p.Println(` `, tagID, `: {"`, id.Language, `", "`, id.Script, `", "`, id.Territory, `", "`, id.String(), `"},`)
})
p.Println(` }`)
p.Println()
p.Println(` for loc, subtags := range expected {`)
p.Println(` lang, script, region := loc.Subtags()`)
p.Println(` switch {`)
p.Println(` case lang != subtags[0]:`)
p.Println(` t.Errorf("unexpected language for %s: %s", subtags[3], lang)`)
p.Println(` case script != subtags[1]:`)
p.Println(` t.Errorf("unexpected script for %s: %s", subtags[3], script)`)
p.Println(` case region != subtags[2]:`)
p.Println(` t.Errorf("unexpected region for %s: %s", subtags[3], region)`)
p.Println(` case loc.String() != subtags[3]:`)
p.Println(` t.Errorf("unexpected region for %s : %s", subtags[3], loc.String())`)
p.Println(` }`)
p.Println(` }`)
p.Println(`}`)
p.Println()
p.Println(`func TestLocaleParents(t *testing.T) {`)
p.Println(` expected := map[Locale]Locale{ // child => parent`)
var parents [][2]cldr.Identity
l.parentTags.iterate(func(child, parent cldr.Identity) {
parents = append(parents, [2]cldr.Identity{child, parent})
})
const perLine = lineLength / 6
for i := 0; i < len(parents); i += perLine {
n := i + perLine
if n > len(parents) {
n = len(parents)
}
p.Print(` `, newTagID(parents[i][0]), `: `, newTagID(parents[i][1]))
for k := i + 1; k < n; k++ {
p.Print(`, `, newTagID(parents[k][0]), `: `, newTagID(parents[k][1]))
}
p.Println(`,`)
}
p.Println(` }`)
p.Println()
p.Println(` for child, parent := range expected {`)
p.Println(` if p := child.parent(); p != parent {`)
p.Println(` t.Errorf("unexpected parent for %s: %s (expected %s)", child.String(), p, parent)`)
p.Println(` }`)
p.Println(` }`)
p.Println(`}`)
p.Println()
p.Println(`func TestLocaleParentsWithTruncation(t *testing.T) {`)
p.Println(` expected := map[Locale]Locale{ // original locale => truncated locale`)
parents = parents[:0]
l.tags.iterate(func(id cldr.Identity) {
if l.parentTags.containsParent(id) {
return
}
truncated := normalizeIdentity(id.Truncate())
if truncated != id {
parents = append(parents, [2]cldr.Identity{id, truncated})
}
})
for i := 0; i < len(parents); i += perLine {
n := i + perLine
if n > len(parents) {
n = len(parents)
}
p.Print(` `, newTagID(parents[i][0]), `: `, newTagID(parents[i][1]))
for k := i + 1; k < n; k++ {
p.Print(`, `, newTagID(parents[k][0]), `: `, newTagID(parents[k][1]))
}
p.Println(`,`)
}
p.Println(` }`)
p.Println()
p.Println(` for loc, truncated := range expected {`)
p.Println(` if p := loc.parent(); p != truncated {`)
p.Println(` t.Errorf("unexpected parent for %s: %s (expected %s)", loc.String(), p, truncated)`)
p.Println(` }`)
p.Println(` }`)
p.Println(`}`)
}
type _localeContainment struct {
id cldr.Identity
childRegions []string
}
type _localeContainmentsByChildRegionCount []_localeContainment
func (s _localeContainmentsByChildRegionCount) Len() int {
return len(s)
}
func (s _localeContainmentsByChildRegionCount) Less(i int, j int) bool {
return len(s[i].childRegions) < len(s[j].childRegions)
}
func (s _localeContainmentsByChildRegionCount) Swap(i int, j int) {
s[i], s[j] = s[j], s[i]
}
|
package lambdaevent
import (
"encoding/json"
"fmt"
"strings"
)
/*
This file contains functions and types to create data structures from lambda
function input received from lambda. You need to use the associated template
to map values into the structure
*/
type lambdaEvent struct {
m map[string]string
}
// Decode takes raw json supplied to the lambda function via node.js and returns
// a struct with a GetValue func to extract data
func Decode(cmd string) (*lambdaEvent, error) {
le := &lambdaEvent{}
le.m = make(map[string]string)
var err error
var event map[string]json.RawMessage
err = json.Unmarshal([]byte(cmd), &event)
if err != nil {
return le, fmt.Errorf("unable to find event data in input: %v\n", err)
}
for k, v := range event {
if isJSON(v) {
copyFromJSON(le, k, v)
} else {
le.m[strings.ToLower(k)] = string(v)
}
}
return le, nil
}
// copyFromJSON recurcively copys elements from the source json string
func copyFromJSON(le *lambdaEvent, pk string, j json.RawMessage) {
var tm map[string]json.RawMessage
_ = json.Unmarshal(j, &tm)
for k, v := range tm {
if isJSON(v) {
copyFromJSON(le, pk+"."+k, v)
} else {
le.m[strings.ToLower(pk+"."+k)] = string(v)
}
}
}
// isJSON returns true if it is given a json string
func isJSON(s json.RawMessage) bool {
var js map[string]interface{}
return json.Unmarshal(s, &js) == nil
}
// GetValue returns the event attribute as a string or an empty string if not found
func (le *lambdaEvent) GetValue(a string) string {
s, _ := le.GetValueBool(a)
return s
}
// GetValueBool returns the event attribute as a string and true or an empty string and false
// if the attribute can not be found
func (le *lambdaEvent) GetValueBool(a string) (string, bool) {
// make sure the struct is valid before scanning it
if le == nil {
return "", false
}
for k, v := range le.m {
if strings.EqualFold(a, k) {
return strings.Trim(v, "\""), true
}
}
return "", false
}
// ListAttributes prints out a list of all known attributes. Debugging
func (le *lambdaEvent) ListAttributes() {
for k, v := range le.m {
fmt.Printf("k: %s v: %s\n", k, v)
}
}
// GetJSON will return a JSON encoded byte array of all values
func (le *lambdaEvent) GetJSON() ([]byte, error) {
return json.MarshalIndent(le.m, "", "\t")
}
// GetKeys returns a slice of strings containing each available value name
func (le *lambdaEvent) GetKeys() []string {
r := make([]string, len(le.m))
for k, _ := range le.m {
r = append(r, k)
}
return r
}
/*
*/
|
package builder
import (
. "openreplay/backend/pkg/messages"
)
const CLICK_RELATION_TIME = 1400
type deadClickDetector struct {
lastMouseClick *MouseClick
lastTimestamp uint64
lastMessageID uint64
inputIDSet map[uint64]bool
}
func (d *deadClickDetector) HandleReaction(timestamp uint64) *IssueEvent {
var i *IssueEvent
if d.lastMouseClick != nil && d.lastTimestamp + CLICK_RELATION_TIME < timestamp {
i = &IssueEvent{
Type: "dead_click",
ContextString: d.lastMouseClick.Label,
Timestamp: d.lastTimestamp,
MessageID: d.lastMessageID,
}
}
d.inputIDSet = nil
d.lastMouseClick = nil
d.lastTimestamp = 0
d.lastMessageID = 0
return i
}
func (d *deadClickDetector) HandleMessage(msg Message, messageID uint64, timestamp uint64) *IssueEvent {
var i *IssueEvent
switch m := msg.(type) {
case *SetInputTarget:
if d.inputIDSet == nil {
d.inputIDSet = make(map[uint64]bool)
}
d.inputIDSet[m.ID] = true
case *CreateDocument:
d.inputIDSet = nil
case *MouseClick:
if m.Label == "" {
return nil
}
i = d.HandleReaction(timestamp)
if d.inputIDSet[m.ID] { // ignore if input
return i
}
d.lastMouseClick = m
d.lastTimestamp = timestamp
d.lastMessageID = messageID
case *SetNodeAttribute,
*RemoveNodeAttribute,
*CreateElementNode,
*CreateTextNode,
*MoveNode,
*RemoveNode,
*SetCSSData,
*CSSInsertRule,
*CSSDeleteRule:
i = d.HandleReaction(timestamp)
}
return i
}
|
package model
import (
"github.com/caos/zitadel/internal/model"
"time"
)
type ProjectGrantMemberView struct {
UserID string
GrantID string
ProjectID string
UserName string
Email string
FirstName string
LastName string
DisplayName string
Roles []string
CreationDate time.Time
ChangeDate time.Time
Sequence uint64
}
type ProjectGrantMemberSearchRequest struct {
Offset uint64
Limit uint64
SortingColumn ProjectGrantMemberSearchKey
Asc bool
Queries []*ProjectGrantMemberSearchQuery
}
type ProjectGrantMemberSearchKey int32
const (
ProjectGrantMemberSearchKeyUnspecified ProjectGrantMemberSearchKey = iota
ProjectGrantMemberSearchKeyUserName
ProjectGrantMemberSearchKeyEmail
ProjectGrantMemberSearchKeyFirstName
ProjectGrantMemberSearchKeyLastName
ProjectGrantMemberSearchKeyGrantID
ProjectGrantMemberSearchKeyUserID
)
type ProjectGrantMemberSearchQuery struct {
Key ProjectGrantMemberSearchKey
Method model.SearchMethod
Value interface{}
}
type ProjectGrantMemberSearchResponse struct {
Offset uint64
Limit uint64
TotalResult uint64
Result []*ProjectGrantMemberView
Sequence uint64
Timestamp time.Time
}
func (r *ProjectGrantMemberSearchRequest) EnsureLimit(limit uint64) {
if r.Limit == 0 || r.Limit > limit {
r.Limit = limit
}
}
|
package main
import (
"fmt"
"io"
"net"
"os"
//"bufio"
"bytes"
//"ioutil"
)
func main() {
fmt.Println(os.Args)
if len(os.Args) != 4 {
fmt.Println("prog listen_port remote_host, remote_port")
os.Exit(3)
}
listen_port := os.Args[1]
rmt_host := os.Args[2]
rmt_port := os.Args[3]
fmt.Println("localhost:" + listen_port + " -> " + rmt_host+":"+rmt_port)
ln, err := net.Listen("tcp", ":"+listen_port)
if err != nil {
panic(err)
}
for {
conn, err := ln.Accept()
if err != nil {
panic(err)
}
go handleRequest(conn, rmt_host, rmt_port)
}
}
func handleRequest(conn net.Conn, rmt_host string, rmt_port string) {
proxy, err := net.Dial("tcp", rmt_host+":"+rmt_port)
if err != nil {
panic(err)
}
// buffer := make([]byte, 60000)
// conn.Read(buffer)
// s := string(buffer)
// fmt.Println(s)
go copyIO(conn, proxy) //local to remote
go copyIO(proxy, conn)
}
func copyIO(src, dest net.Conn) {
defer src.Close()
defer dest.Close()
var b bytes.Buffer
_ = io.Writer(&b)
io.Copy(src, io.TeeReader(dest, &b))
fmt.Println(b.String())
} |
package states
import (
"bytes"
"io"
"matrixchain/common/serialization"
"github.com/zhaohaijun/blockchain-crypto/keypair"
)
type BookkeeperState struct {
StateBase
CurrBookkeeper []keypair.PublicKey
NextBookkeeper []keypair.PublicKey
}
func (this *BookkeeperState) Serialize(w io.Writer) error {
this.StateBase.Serialize(w)
serialization.WriteUint32(w, uint32(len(this.CurrBookkeeper)))
for _, v := range this.CurrBookkeeper {
buf := keypair.SerializePublicKey(v)
err := serialization.WriteVarBytes(w, buf)
if err != nil {
return err
}
}
serialization.WriteUint32(w, uint32(len(this.NextBookkeeper)))
for _, v := range this.NextBookkeeper {
buf := keypair.SerializePublicKey(v)
err := serialization.WriteVarBytes(w, buf)
if err != nil {
return err
}
}
return nil
}
func (this *BookkeeperState) Deserialize(r io.Reader) error {
err := this.StateBase.Deserialize(r)
if err != nil {
return err
}
n, err := serialization.ReadUint32(r)
if err != nil {
return err
}
for i := 0; i < int(n); i++ {
buf, err := serialization.ReadVarBytes(r)
if err != nil {
return err
}
key, err := keypair.DeserializePublicKey(buf)
this.CurrBookkeeper = append(this.CurrBookkeeper, key)
}
n, err = serialization.ReadUint32(r)
if err != nil {
return err
}
for i := 0; i < int(n); i++ {
buf, err := serialization.ReadVarBytes(r)
if err != nil {
return err
}
key, err := keypair.DeserializePublicKey(buf)
this.NextBookkeeper = append(this.NextBookkeeper, key)
}
return nil
}
func (v *BookkeeperState) ToArray() []byte {
b := new(bytes.Buffer)
v.Serialize(b)
return b.Bytes()
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/gameontext/a8-room/pkg/gameon"
)
var exits = map[string]string{
"N": "An old wooden door with a large arrow carved on its center",
"S": "A heavy metal door with signs of rust",
"W": "A gray, plain looking door",
"E": "A door surrounded by a mysterious glow along it edges",
}
type room struct {
profanityChecker ProfanityChecker
}
func newRoom() *room {
return &room{
profanityChecker: newProfanityChecker(),
}
}
func (r *room) hello(resp http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
resp.WriteHeader(http.StatusMethodNotAllowed)
return
}
var hello gameon.Hello
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&hello)
if err != nil || hello.UserID == "" {
resp.WriteHeader(http.StatusBadRequest)
return
}
location := gameon.Message{
Direction: "player",
Recipient: hello.UserID,
Payload: jsonMarshal(gameon.Location{
Type: "location",
Name: "Chatter",
FullName: "A chat room",
Description: "a darkly lit room, there are people here, some are walking around, some are standing in groups",
Exits: exits,
Commands: map[string]string{},
Inventory: []string{},
}),
}
welcome := gameon.Message{
Direction: "player",
Recipient: "*",
Payload: jsonMarshal(gameon.Event{
Type: "event",
Content: map[string]string{
hello.UserID: "Welcome!",
"*": fmt.Sprintf("%s has just entered the room", hello.Username),
},
}),
}
writeResponseMessages(resp, location, welcome)
}
func (r *room) goodbye(resp http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
resp.WriteHeader(http.StatusMethodNotAllowed)
return
}
var goodbye gameon.Goodbye
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&goodbye)
if err != nil || goodbye.UserID == "" {
resp.WriteHeader(http.StatusBadRequest)
return
}
farewell := gameon.Message{
Direction: "player",
Recipient: "*",
Payload: jsonMarshal(gameon.Event{
Type: "event",
Content: map[string]string{
goodbye.UserID: "Farewell!",
"*": fmt.Sprintf("%s has left the room", goodbye.Username),
},
}),
}
writeResponseMessages(resp, farewell)
}
func (r *room) room(resp http.ResponseWriter, req *http.Request) {
if req.Method != "POST" {
resp.WriteHeader(http.StatusMethodNotAllowed)
return
}
var command gameon.RoomCommand
decoder := json.NewDecoder(req.Body)
err := decoder.Decode(&command)
if err != nil || command.UserID == "" || command.Content == "" {
resp.WriteHeader(http.StatusBadRequest)
return
}
if strings.HasPrefix(command.Content, "/") {
// slash command
r.handleSlash(command, resp)
} else {
// chat command
r.handleChat(command, resp)
}
}
func (r *room) handleSlash(command gameon.RoomCommand, resp http.ResponseWriter) {
words := strings.Fields(command.Content)
commandName := strings.ToLower(words[0])
var eventContent string
switch commandName {
case "/go":
if len(words) < 2 {
eventContent = "Go where?"
break
}
exitID := strings.ToUpper(words[1])
if _, ok := exits[exitID]; !ok {
eventContent = "You probably don't wanna go there..."
break
}
location := gameon.Message{
Direction: "playerLocation",
Recipient: command.UserID,
Payload: jsonMarshal(gameon.PlayerLocation{
Type: "exit",
Content: "You frantically run towards the exit",
ExitID: exitID,
}),
}
writeResponseMessages(resp, location)
return
case "/examine":
eventContent = "Shouldn't you be mingling?"
case "/inventory":
eventContent = "There is nothing here"
case "/look":
eventContent = "It's just a room"
default:
eventContent = fmt.Sprintf("Don't know how to %s", commandName[1:])
}
event := gameon.Message{
Direction: "player",
Recipient: command.UserID,
Payload: jsonMarshal(gameon.Event{
Type: "event",
Content: map[string]string{
command.UserID: eventContent,
},
}),
}
writeResponseMessages(resp, event)
}
func (r *room) handleChat(command gameon.RoomCommand, resp http.ResponseWriter) {
var msg gameon.Message
dirty := r.profanityChecker.Check(command.Content)
if dirty {
msg = gameon.Message{
Direction: "player",
Recipient: command.UserID,
Payload: jsonMarshal(gameon.Event{
Type: "event",
Content: map[string]string{
command.UserID: "Pardon your french!",
},
}),
}
} else {
msg = gameon.Message{
Direction: "player",
Recipient: "*",
Payload: jsonMarshal(gameon.Chat{
Type: "chat",
Username: command.Username,
Content: command.Content,
}),
}
}
writeResponseMessages(resp, msg)
}
func writeResponseMessages(resp http.ResponseWriter, messages ...gameon.Message) {
bytes := jsonMarshal(gameon.MessageCollection{
Messages: messages,
})
resp.Header().Set("Content-Type", "application/json")
resp.WriteHeader(http.StatusOK)
resp.Write(bytes)
}
func jsonMarshal(obj interface{}) []byte {
bytes, _ := json.Marshal(obj)
return bytes
}
|
package log
import (
log "github.com/sirupsen/logrus"
)
var logPath string
func init() {
//pwd, _ := os.Getwd()
//path := filepath.Dir(pwd)
//logPath = filepath.Join(path, "log.txt")
//fmt.Println(logPath)
//f, err := os.OpenFile("log.txt", os.O_WRONLY|os.O_CREATE|os.O_APPEND, os.ModePerm)
//if err != nil{
// return
//}
//outPut := io.Writer(f)
// 设置日志格式为json格式
//log.SetFormatter(&log.TextFormatter{})
// 设置将日志输出到标准输出(默认的输出为stderr,标准错误)
// 日志消息输出可以是任意的io.writer类型
//log.SetOutput(outPut)
// 增加hook,输出日行号与代码位置
log.AddHook(lineHook{Field:"source"})
log.AddHook(newLfsHook(5, 24))
// 设置日志级别为warn以上
//log.SetLevel(log.WarnLevel)
}
|
package sliding_window
import "testing"
func TestMaxProfit(t *testing.T) {
subTests := []struct {
input []int
result int
}{
{
input: []int{7, 1, 5, 3, 6, 4},
result: 5,
},
}
for _, test := range subTests {
if s := maxProfit(test.input); s != test.result {
t.Errorf("wanted %v, got %v", test.result, s)
}
}
}
|
package lib
import (
"fmt"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"os"
"strings"
)
type Handler struct {
Conf Config
}
func (slf Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cdn-Source","gocdn")
path := r.URL.Path
//分析path
///bucket/version/filename
// /hashmap/v.1/test/1.png //正常文件
// /hashmap/v.1.min/test/1.png //压缩
reg := regexp.MustCompile(`(?U)^/([^/]+)(/.*?)`)
all := reg.FindAllStringSubmatch(path, -1)
if len(all) < 1 {
slf.halt(w, r, 404)
return
}
//仓库
bucketname := all[0][1]
//判断仓库是否存在
_, ok := slf.Conf.Buckets[bucketname]
if !ok {
slf.halt(w, r, 404)
return
}
var bucket Bucket = slf.Conf.Buckets[bucketname]
//包含版本路径
ver_path := all[0][2]
filename := ver_path
//分析版本
reg = regexp.MustCompile(`(?U)^/v\.([^/]+)(/.*?)`)
all = reg.FindAllStringSubmatch(ver_path, -1)
ver := "default"
min := false
if len(all) > 0 {
ver = all[0][1]
reg = regexp.MustCompile(`\.min`)
min = reg.MatchString(ver)
filename = all[0][2]
} else {
}
//获取文件ext
reg = regexp.MustCompile(`(?U)\.([^.]+)$`)
all = reg.FindAllStringSubmatch(filename, -1)
ext := ""
if len(all) > 0 {
ext = all[0][1]
} else {
}
//文件对象
bf := BucketFile{
Bucket{
bucket.Name,
bucket.IsLocal,
bucket.Root,
bucket.Deny,
},
filename,
ver,
min,
ext,
getMime(ext, slf.Conf),
}
if slf.checkDeny(bf) {
slf.halt(w, r, 403)
return
}
if bucket.IsLocal {
slf.local(bf, w, r)
} else {
slf.remote(bf, w, r)
}
return
}
func (slf Handler) local(bf BucketFile, w http.ResponseWriter, r *http.Request) {
filename := bf.Root + bf.Filename
//fmt.Fprintln(w,filename)
//判断文件是否存在
stat,err := os.Stat(filename)
if err != nil {
slf.halt(w, r, 400)
return
}
//判断目录
if stat.IsDir() {
slf.halt(w, r, 403)
return
}
lenthg := stat.Size()
w.WriteHeader(200)
w.Header().Set("Content-type", bf.Mime)
w.Header().Set("Content-length", strconv.FormatInt(lenthg, 10))
//逐行读取文件
file ,err := os.Open(filename)
if err != nil {
slf.halt(w, r, 403)
return
}
defer file.Close()
b := make([]byte, 1024)
for {
_,err := file.Read(b)
if err != nil {
break
}
w.Write(b)
}
//fmt.Fprintln(w,stat)
//fmt.Fprintln(w,err)
}
func (slf Handler) remote(bf BucketFile, w http.ResponseWriter, r *http.Request) {
//拼装url
url := bf.Root + bf.Filename
resp, err := http.Get(url)
if err != nil {
slf.halt(w, r, 400)
return
}
if resp.StatusCode != 200 {
slf.halt(w, r, resp.StatusCode)
return
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
slf.halt(w, r, 403)
return
}
w.WriteHeader(200)
w.Header().Set("Content-type", bf.Mime)
w.Header().Set("Content-length", strconv.FormatInt(resp.ContentLength, 10))
w.Write(body)
}
func (slf Handler) checkDeny(bf BucketFile) bool {
dep := ","
denys := dep+slf.Conf.Deny +dep+ bf.Deny+dep
return strings.Contains(denys,dep+bf.Ext+dep)
}
func (slf Handler) halt(w http.ResponseWriter, r *http.Request, code int) {
//codes := make(map[string]string)
codes := map[string]string{
//1 消息
"100": "100 Continue",
"101": "101 Switching Protocols",
"102": "102 Processing",
//2 成功
"200": "200 OK",
"201": "201 Created",
"202": "202 Accepted",
"203": "203 Non-Authoritative Information",
"204": "204 No Content",
"205": "205 Reset Content",
"206": "206 Partial Content",
"207": "207 Multi-Status",
//3 重定向
"300": "300 Multiple Choices",
"301": "301 Moved Permanently",
"302": "302 Move temporarily",
"303": "303 See Other",
"304": "304 Not Modified",
"305": "305 Use Proxy",
"306": "306 Switch Proxy",
"307": "307 Temporary Redirect",
//4 请求错误
"400": "400 Bad Request",
"401": "401 Unauthorized",
"402": "402 Payment Required",
"403": "403 Forbidden",
"404": "404 Not Found",
"405": "405 Method Not Allowed",
"406": "406 Not Acceptable",
"407": "407 Proxy Authentication Required",
"408": "408 Request Timeout",
"409": "409 Conflict",
"410": "410 Gone",
"411": "411 Length Required",
"412": "412 Precondition Failed",
"413": "413 Request Entity Too Large",
"414": "414 Request-URI Too Long",
"415": "415 Unsupported Media Type",
"416": "416 Requested Range Not Satisfiable",
"417": "417 Expectation Failed",
"421": "421 too many connections",
"422": "422 Unprocessable Entity",
"423": "423 Locked",
"424": "424 Failed Dependency",
"425": "425 Unordered Collection",
"426": "426 Upgrade Required",
"449": "449 Retry With",
"451": "451 Unavailable For Legal Reasons",
//5 服务器错误(5、6字头)
"500": "500 Internal Server Error",
"501": "501 Not Implemented",
"502": "502 Bad Gateway",
"503": "503 Service Unavailable",
"504": "504 Gateway Timeout",
"505": "505 HTTP Version Not Supported",
"506": "506 Variant Also Negotiates",
"507": "507 Insufficient Storage",
"509": "509 Bandwidth Limit Exceeded",
"510": "510 Not Extended",
"600": "600 Unparseable Response Headers",
}
code_status, ok := codes[strconv.Itoa(code)]
if !ok {
code_status = codes["404"]
}
w.WriteHeader(code)
fmt.Fprintln(w, "<h1>"+code_status+" </h1>")
}
|
package main
import "fmt"
func main() {
var i, j, length int
//i,j는 두 개의 반복문에 쓰일 변수
fmt.Scanln(&length)
for i = 0; i < length; i++ {
for j = 0; j < i; j++ {
fmt.Print("o ")
}
fmt.Println("* ")
}
} |
package utils
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/PuerkitoBio/goquery"
)
// Match type
type Match struct {
LeftTeam string `json:"LeftTeam"`
RightTeam string `json:"RightTeam"`
TimeStamp string `json:"TimeStamp"`
}
func getDataFromFile() (map[string][]Match, error) {
tourneyMap := make(map[string][]Match)
f, fErr := os.Open("./matches.json")
if fErr != nil {
return tourneyMap, errors.New("file does not exist")
}
fi, _ := f.Stat()
t1 := time.Now()
Log.Debug(fi.ModTime())
Log.Debug(t1.Sub(fi.ModTime()).Hours())
if t1.Sub(fi.ModTime()).Hours() > 24.0 {
return tourneyMap, errors.New("file is too old")
}
byteValue, _ := ioutil.ReadAll(f)
em := json.Unmarshal(byteValue, &tourneyMap)
if em != nil {
panic(em)
}
return tourneyMap, nil
}
func writeToFile(data map[string][]Match) {
jsonData, err := json.Marshal(data)
if err != nil {
panic(err)
}
// sanity check
// Log.Info(string(jsonData))
// write to JSON file
jsonFile, err := os.Create("./matches.json")
if err != nil {
panic(err)
}
defer jsonFile.Close()
jsonFile.Write(jsonData)
jsonFile.Close()
Log.Info("JSON data written to ", jsonFile.Name())
}
// GetDotaMatches returns a set of web scapred matches
func GetDotaMatches() map[string][]Match {
data, err := getDataFromFile()
if err == nil {
Log.Info("Loading data from local file")
return data
}
Log.Info(err)
tourneyMap := make(map[string][]Match)
// Make HTTP request
response, err := http.Get("https://liquipedia.net/dota2/Liquipedia:Upcoming_and_ongoing_matches")
if err != nil {
log.Fatal(err)
}
defer response.Body.Close()
// Create a goquery document from the HTTP response
document, err := goquery.NewDocumentFromReader(response.Body)
if err != nil {
log.Fatal("Error loading HTTP response body. ", err)
}
document.Find(".infobox_matches_content").Each(func(i int, s *goquery.Selection) {
leftTeam := strings.TrimSpace(s.Find(".team-left").Text())
rightTeam := strings.TrimSpace(s.Find(".team-right").Text())
timeStamp := strings.TrimSpace(s.Find(".timer-object-countdown-only").Text())
tourney := strings.TrimSpace(s.Find("a").Last().Text())
tourneyMap[tourney] = append(tourneyMap[tourney], Match{
LeftTeam: leftTeam,
RightTeam: rightTeam,
TimeStamp: timeStamp,
})
})
writeToFile(tourneyMap)
return tourneyMap
}
// GetFormatedMatches returns a formated string of matches
func GetFormatedMatches(tourneyMap map[string][]Match) string {
returnString := "Upcoming Dota Matches \n"
for tourney, matches := range tourneyMap {
returnString += fmt.Sprintf("%s \n", tourney)
for _, match := range matches {
returnString += fmt.Sprintf(" %s vs %s | at %s \n", match.LeftTeam, match.RightTeam, match.TimeStamp)
}
}
return returnString
}
// ChunkString chunks a string to a given size
func ChunkString(s string, chunkSize int) []string {
var chunks []string
runes := []rune(s)
if len(runes) == 0 {
return []string{s}
}
for i := 0; i < len(runes); i += chunkSize {
nn := i + chunkSize
if nn > len(runes) {
nn = len(runes)
}
chunks = append(chunks, string(runes[i:nn]))
}
return chunks
}
|
package httpserver
import (
"github.com/gin-gonic/gin"
"xj_web_server/httpserver/activity"
"xj_web_server/httpserver/agent"
"xj_web_server/httpserver/exchange"
"xj_web_server/httpserver/game"
"xj_web_server/httpserver/handle"
"xj_web_server/httpserver/index"
"xj_web_server/httpserver/news"
"xj_web_server/httpserver/rank"
"xj_web_server/httpserver/servermiddleware"
"xj_web_server/httpserver/sign"
"xj_web_server/httpserver/user"
"xj_web_server/httpserver/wss"
"xj_web_server/module/apigw"
)
func initRoutes(router *gin.Engine) {
router.GET("/version", handle.Version)
router.GET("/ping", handle.Ping)
router.GET("/v1/check/time", handle.CheckTime)
//websocket,
router.GET("/v1/wss", wss.Wss)
router.GET("/v1/client", wss.Client)
router.POST("/v1/game/version/set", game.SetGameVersion)
v1 := router.Group("/v1", servermiddleware.Base())
{
//获取登录服务器地址
v1.POST("/host", index.GetHost)
//获取大厅服务器地址
v1.POST("/hall/host", index.GetHallHost)
//注册
v1.POST("/registered", user.Registered)
//获取验证码
v1.POST("/getcode", user.GetCode)
//忘记密码
v1.POST("/forgot/pwd", user.ForgotPwd)
//获取用户信息
userPath := v1.Group("/user", servermiddleware.BaseAuth())
{
userPath.POST("/msg", user.GetUserMsg)
//战绩
userPath.POST("/record/list", user.RecordList)
//修改用户信息
userPath.POST("/update/information", user.UpdateInformation)
// 救济金规则列表
userPath.POST("/benefits", user.BaseEnsures)
//领取救济金
userPath.POST("/benefits/receive", user.TakeBaseEnsure)
// 绑定银行卡
userPath.POST("/binding/bankcard", user.BindingBankcard)
//绑定手机号码
userPath.POST("/binding/mobile", user.BindingMobile)
}
//代理
agentPath := v1.Group("/agent", servermiddleware.BaseAuth())
{
//我的推广
agentPath.POST("/team/mypromote", agent.MyPromote)
//直属
agentPath.POST("/team/dire", agent.TeamDire)
//返佣详情
agentPath.POST("/team/level", agent.GetAgentRotyaltyLevel)
//配置
agentPath.POST("/team/config", agent.GetAgentRotyaltyConfig)
//业绩
agentPath.POST("/team/achievement", agent.TeamAchievement)
//业绩来源
agentPath.POST("/team/achievement/form", agent.TeamAchievementForm)
//agentPath.POST("/team/promote", agent.TeamPromote)
//领取佣金
agentPath.POST("/team/agent/take", agent.TeamTakeAgentRoyalty)
//佣金记录
agentPath.POST("/team/agent/take/record", agent.TeamTakeAgentRecord)
//一级代理
agentPath.POST("/team/one", agent.One)
//agentPath.POST("/team/record", agent.TeamList)
//agentPath.POST("/daily/knots", agent.DailyKnots)
//agentPath.POST("/user/info", agent.UserGameInfo)
//agentPath.POST("/diamond/logs", agent.DiamondChangeLog)
}
//启动页数据
v1.POST("/index", index.Star)
//签到
signPath := v1.Group("/sign", servermiddleware.BaseAuth())
{
//签到
signPath.POST("/receive", sign.SigIn)
//签到列表
signPath.POST("/list", sign.SigList)
}
wheelPath := v1.Group("/wheel", servermiddleware.BaseAuth())
{
//大转盘列表
wheelPath.POST("/rules", sign.BigWheelRules)
//大转盘抽奖
wheelPath.POST("/turntable", sign.BigWheelTurntable)
//大转盘记录
wheelPath.POST("/record", sign.BigWheelRecord)
}
rankPath := v1.Group("/rank", servermiddleware.BaseAuth())
{
//排行榜
rankPath.POST("/list", rank.RanksList)
}
exchangePath := v1.Group("/exchange", servermiddleware.BaseAuth())
{
//获取兑换配置
exchangePath.POST("/config", exchange.Config)
//钻石兑换
exchangePath.POST("/diamond", exchange.DiamondExchange)
//钻石兑换记录
exchangePath.POST("/diamond/record", exchange.DiamondExchangeRecord)
}
//公告列表
v1.POST("/news/info", news.NewsInfo)
//跑马灯
v1.POST("/prize/info", news.RecordPrizeInfo)
//活动
v1.POST("/activitylist", activity.ActivityList)
v1.POST("/game/version/list", game.GetAllGameVersionV2)
}
v2 := router.Group("/v2", servermiddleware.Base())
{
v2.POST("/game/version/list", game.GetAllGameVersionV2)
}
//微服务 api 网关
router.POST("v1/user/signup", apigw.DoSignUpHandler)
//获取服务器列表
router.POST("v1/public/gethost", apigw.GetHostHandler)
}
|
package utils
import "github.com/pkg/errors"
type AppError struct {
Code int
Err error
}
func (appError *AppError) Error() string {
return appError.Err.Error()
}
func GetAppError(err error, errMsg string, code int) *AppError {
return &AppError{
Code: code,
Err: errors.Wrap(err, errMsg),
}
}
|
package news
import (
"github.com/yogaagungk/newsupdate/model"
)
//Service digunakan sebagai contract
type Service interface {
Save(data *model.News) (model.News, error)
FetchAll(page int) ([]model.News, error)
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package prometheusscrape
import (
"testing"
apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common"
apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1"
"github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1"
"github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature/fake"
"github.com/DataDog/datadog-operator/controllers/datadogagent/feature/test"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func Test_prometheusScrapeFeature_Configure(t *testing.T) {
yamlConfigs := `
-
autodiscovery:
kubernetes_annotations:
exclude:
custom_exclude_label: 'true'
include:
custom_include_label: 'true'
kubernetes_container_names:
- my-app
configurations:
- send_distribution_buckets: true
timeout: 5`
jsonConfigs := `[{"autodiscovery":{"kubernetes_annotations":{"exclude":{"custom_exclude_label":"true"},"include":{"custom_include_label":"true"}},"kubernetes_container_names":["my-app"]},"configurations":[{"send_distribution_buckets":true}],"timeout":5}]`
// v1alpha1
ddav1PrometheusScrapeDisabled := v1alpha1.DatadogAgent{
Spec: v1alpha1.DatadogAgentSpec{
Features: v1alpha1.DatadogFeatures{
PrometheusScrape: &v1alpha1.PrometheusScrapeConfig{
Enabled: apiutils.NewBoolPointer(false),
},
},
},
}
ddav1PrometheusScrapeEnabled := ddav1PrometheusScrapeDisabled.DeepCopy()
{
ddav1PrometheusScrapeEnabled.Spec.Features.PrometheusScrape.Enabled = apiutils.NewBoolPointer(true)
}
ddav1PrometheusScrapeServiceEndpoints := ddav1PrometheusScrapeEnabled.DeepCopy()
{
ddav1PrometheusScrapeServiceEndpoints.Spec.Features.PrometheusScrape.ServiceEndpoints = apiutils.NewBoolPointer(true)
}
ddav1PrometheusScrapeAdditionalConfigs := ddav1PrometheusScrapeEnabled.DeepCopy()
{
ddav1PrometheusScrapeAdditionalConfigs.Spec.Features.PrometheusScrape.AdditionalConfigs = apiutils.NewStringPointer(yamlConfigs)
}
// v2alpha1
ddav2PrometheusScrapeDisabled := v2alpha1.DatadogAgent{
Spec: v2alpha1.DatadogAgentSpec{
Features: &v2alpha1.DatadogFeatures{
PrometheusScrape: &v2alpha1.PrometheusScrapeFeatureConfig{
Enabled: apiutils.NewBoolPointer(false),
},
},
},
}
ddav2PrometheusScrapeEnabled := ddav2PrometheusScrapeDisabled.DeepCopy()
{
ddav2PrometheusScrapeEnabled.Spec.Features.PrometheusScrape.Enabled = apiutils.NewBoolPointer(true)
}
ddav2PrometheusScrapeServiceEndpoints := ddav2PrometheusScrapeEnabled.DeepCopy()
{
ddav2PrometheusScrapeServiceEndpoints.Spec.Features.PrometheusScrape.EnableServiceEndpoints = apiutils.NewBoolPointer(true)
}
ddav2PrometheusScrapeAdditionalConfigs := ddav2PrometheusScrapeEnabled.DeepCopy()
{
ddav2PrometheusScrapeAdditionalConfigs.Spec.Features.PrometheusScrape.AdditionalConfigs = apiutils.NewStringPointer(yamlConfigs)
}
ddav2PrometheusScrapeWithVersion := ddav2PrometheusScrapeEnabled.DeepCopy()
ddav2PrometheusScrapeWithVersion.Spec.Features.PrometheusScrape.Version = apiutils.NewIntPointer(1)
tests := test.FeatureTestSuite{
///////////////////////////
// v1alpha1.DatadogAgent //
///////////////////////////
{
Name: "v1alpha1 Prometheus scrape not enabled",
DDAv1: ddav1PrometheusScrapeDisabled.DeepCopy(),
WantConfigure: false,
},
{
Name: "v1alpha1 Prometheus scrape enabled",
DDAv1: ddav1PrometheusScrapeEnabled,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
{
Name: "v1alpha1 Prometheus scrape service endpoints enabled",
DDAv1: ddav1PrometheusScrapeServiceEndpoints,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "true",
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "true",
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
{
Name: "v1alpha1 Prometheus scrape additional configs",
DDAv1: ddav1PrometheusScrapeAdditionalConfigs,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
{
Name: apicommon.DDPrometheusScrapeChecks,
Value: jsonConfigs,
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
{
Name: apicommon.DDPrometheusScrapeChecks,
Value: jsonConfigs,
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
// ///////////////////////////
// // v2alpha1.DatadogAgent //
// ///////////////////////////
{
Name: "v2alpha1 Prometheus scrape not enabled",
DDAv2: ddav2PrometheusScrapeDisabled.DeepCopy(),
WantConfigure: false,
},
{
Name: "v2alpha1 Prometheus scrape enabled",
DDAv2: ddav2PrometheusScrapeEnabled,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
{
Name: "v2alpha1 Prometheus scrape service endpoints enabled",
DDAv2: ddav2PrometheusScrapeServiceEndpoints,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "true",
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "true",
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
{
Name: "v2alpha1 Prometheus scrape additional configs",
DDAv2: ddav2PrometheusScrapeAdditionalConfigs,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
{
Name: apicommon.DDPrometheusScrapeChecks,
Value: jsonConfigs,
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
{
Name: apicommon.DDPrometheusScrapeChecks,
Value: jsonConfigs,
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
{
Name: "v2alpha1 version specified",
DDAv2: ddav2PrometheusScrapeWithVersion,
WantConfigure: true,
Agent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
wantEnvVars := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
{
Name: apicommon.DDPrometheusScrapeVersion,
Value: "1",
},
}
coreAgentEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.CoreAgentContainerName]
assert.True(t, apiutils.IsEqualStruct(coreAgentEnvVars, wantEnvVars), "Core Agent envvars \ndiff = %s", cmp.Diff(coreAgentEnvVars, wantEnvVars))
},
),
ClusterAgent: test.NewDefaultComponentTest().WithWantFunc(
func(t testing.TB, mgrInterface feature.PodTemplateManagers) {
mgr := mgrInterface.(*fake.PodTemplateManagers)
dcaEnvVars := mgr.EnvVarMgr.EnvVarsByC[apicommonv1.ClusterAgentContainerName]
want := []*corev1.EnvVar{
{
Name: apicommon.DDPrometheusScrapeEnabled,
Value: "true",
},
{
Name: apicommon.DDPrometheusScrapeServiceEndpoints,
Value: "false",
},
{
Name: apicommon.DDPrometheusScrapeVersion,
Value: "1",
},
}
assert.True(t, apiutils.IsEqualStruct(dcaEnvVars, want), "DCA envvars \ndiff = %s", cmp.Diff(dcaEnvVars, want))
},
),
},
}
tests.Run(t, buildPrometheusScrapeFeature)
}
|
package convert
import (
"fmt"
"strconv"
)
// ToString convert to string
func ToString(str interface{}) string {
switch str.(type) {
case int:
return strconv.Itoa(str.(int))
case int64:
return fmt.Sprintf("%v", str.(int64))
case string:
return str.(string)
case float64:
return fmt.Sprintf("%v", str.(float64))
case float32:
return fmt.Sprintf("%v", str.(float32))
default:
return ""
}
}
|
package follower
import (
"fmt"
userModel "go_simpleweibo/app/models/user"
"go_simpleweibo/database"
)
// Followers 获取粉丝列表
func Followers(userID, offset, limit int) (followers []*userModel.User, err error) {
followers = make([]*userModel.User, 0)
joinSQL := fmt.Sprintf("inner join %s on users.id = followers.follower_id", tableName)
if limit == 0 {
d := database.DB.Model(&userModel.User{}).Joins(joinSQL).Where("followers.user_id = ?", userID).Order("id").Find(&followers)
return followers, d.Error
} else {
d := database.DB.Model(&userModel.User{}).Joins(joinSQL).Where("followers.user_id = ?", userID).Offset(offset).Limit(limit).Order("id").Find(&followers)
return followers, d.Error
}
}
// Followings 获取用户关注人列表
func Followings(userID, offset, limit int) (followers []*userModel.User, err error) {
followers = make([]*userModel.User, 0)
joinSQL := fmt.Sprintf("inner join %s on users.id = followers.user_id", tableName)
if limit == 0 {
d := database.DB.Model(&userModel.User{}).Joins(joinSQL).Where("followers.follower_id = ?", userID).Order("id").Find(&followers)
return followers, d.Error
} else {
d := database.DB.Model(&userModel.User{}).Joins(joinSQL).Where("followers.follower_id = ?", userID).Offset(offset).Limit(limit).Order("id").Find(&followers)
return followers, d.Error
}
}
// FollowingsIDList 获取用户关注人 ID 列表
func FollowingsIDList(userID int) (followerIDList []uint) {
followers, _ := Followings(userID, 0, 0)
followerIDList = make([]uint, 0)
for _, v := range followers {
followerIDList = append(followerIDList, v.ID)
}
return
}
// FollowingsCount 关注数
func FollowingsCount(userID int) (count int, err error) {
joinSQL := fmt.Sprintf("inner join %s on users.id = followers.user_id", tableName)
err = database.DB.Model(&userModel.User{}).Joins(joinSQL).Where("followers.follower_id = ?", userID).Count(&count).Error
return
}
// FollowersCount 粉丝数
func FollowersCount(userID int) (count int, err error) {
joinSQL := fmt.Sprintf("inner join %s on users.id = followers.follower_id", tableName)
err = database.DB.Model(&userModel.User{}).Joins(joinSQL).Where("followers.user_id = ?", userID).Count(&count).Error
return
}
// IsFollowing 已经关注了
func IsFollowing(currentUserID, userID int) bool {
followerIDList := FollowingsIDList(currentUserID)
id := uint(userID)
for _, v := range followerIDList {
if id == v {
return true
}
}
return false
}
|
package cmd
import (
"fmt"
"io/ioutil"
"os"
log "github.com/Sirupsen/logrus"
qre "github.com/skip2/go-qrcode"
"github.com/spf13/cobra"
qri "github.com/tuotoo/qrcode"
)
var dockerfile, qrfile string
func init() {
dockerqr.AddCommand(qrbuild)
dockerqr.AddCommand(qrimport)
qrbuild.Flags().StringVarP(&dockerfile, "dockerfile", "d", os.Getenv("QR_DOCKERFILE"), "Path to a dockerfile")
qrbuild.Flags().StringVarP(&qrfile, "qrfile", "q", os.Getenv("QR_QRFILE"), "Path to a QR image")
qrimport.Flags().StringVarP(&qrfile, "qrfile", "q", os.Getenv("QR_QRFILE"), "Path to a QR image")
}
var dockerqr = &cobra.Command{
Use: "dockerqr",
Short: "Builds and exports dockerfiles to/from QR files",
}
// Execute -
func Execute() {
if err := dockerqr.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
// qrbuild - Creates a QR code from a dockerfile //TODO
var qrbuild = &cobra.Command{
Use: "qrbuild",
Short: "Creates a QR image from a Dockerfile ",
Run: func(cmd *cobra.Command, args []string) {
if dockerfile == "" {
log.Info("No Dockerfile specified, looking in current directory for ./dockerfile")
dockerfile = "./dockerfile"
}
b, err := ioutil.ReadFile(dockerfile)
if err != nil {
log.Fatalf("%v\n", err)
}
contents := string(b)
if qrfile == "" {
log.Debugf("No output QR file specified, using default")
qrfile = "dockerfile.png"
}
err = qre.WriteFile(contents, qre.Highest, 256, qrfile)
if err != nil {
log.Fatalf("%v\n", err)
}
},
}
// qrimport - Imports an image and processes it
var qrimport = &cobra.Command{
Use: "qrimport",
Short: "Creates a QR image from a Dockerfile ",
Run: func(cmd *cobra.Command, args []string) {
if qrfile == "" {
// Default to default QR code image
qrfile = "dockerfile.png"
}
fi, err := os.Open(qrfile)
if err != nil {
log.Fatalf("%v\n", err)
}
defer fi.Close()
qrmatrix, err := qri.Decode(fi)
if err != nil {
log.Fatalf("%v\n", err)
}
fmt.Printf("%s\n", qrmatrix.Content)
},
}
|
package wkbcommon
import (
"io"
"github.com/paulmach/orb"
)
func readCollection(r io.Reader, order byteOrder, buf []byte) (orb.Collection, error) {
num, err := readUint32(r, order, buf[:4])
if err != nil {
return nil, err
}
alloc := num
if alloc > MaxMultiAlloc {
// invalid data can come in here and allocate tons of memory.
alloc = MaxMultiAlloc
}
result := make(orb.Collection, 0, alloc)
d := NewDecoder(r)
for i := 0; i < int(num); i++ {
geom, _, err := d.Decode()
if err != nil {
return nil, err
}
result = append(result, geom)
}
return result, nil
}
func (e *Encoder) writeCollection(c orb.Collection, srid int) error {
err := e.writeTypePrefix(geometryCollectionType, len(c), srid)
if err != nil {
return err
}
for _, geom := range c {
err := e.Encode(geom, 0)
if err != nil {
return err
}
}
return nil
}
|
package main
import "fmt"
func main() {
type vehicle struct {
doors string
color string
}
type truck struct {
vehicle
fourWheel bool
}
type sedan struct {
vehicle
luxury bool
}
t := truck{
vehicle: vehicle{
doors: "conventional",
color: "red"},
fourWheel: true,
}
s := sedan{
vehicle: vehicle{
doors: "butterfly",
color: "black"},
luxury: true,
}
fmt.Println("####################################################################################")
fmt.Println()
fmt.Printf("TRUCK: %v\n", t)
fmt.Println()
fmt.Printf("SEDAN: %v\n", s)
fmt.Println()
fmt.Println(t.fourWheel)
fmt.Println(s.luxury)
fmt.Println("####################################################################################")
}
|
package eventchannel
import (
"bytes"
"compress/gzip"
"io"
"math"
"sync"
"testing"
"time"
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/assert"
)
var largeBufferSize = int64(math.MaxInt64)
var largeEventCount = int64(math.MaxInt64)
var maxTime = 2 * time.Hour
func readGz(encoded []byte) string {
gr, _ := gzip.NewReader(bytes.NewReader(encoded))
defer gr.Close()
decoded, _ := io.ReadAll(gr)
return string(decoded)
}
func newSender(dataSent chan []byte) Sender {
mux := &sync.Mutex{}
return func(payload []byte) error {
mux.Lock()
defer mux.Unlock()
dataSent <- payload
return nil
}
}
func readChanOrTimeout(t *testing.T, c <-chan []byte, msgAndArgs ...interface{}) ([]byte, bool) {
t.Helper()
select {
case actual := <-c:
return actual, false
case <-time.After(200 * time.Millisecond):
return nil, assert.Fail(t, "Should receive an event, but did NOT", msgAndArgs...)
}
}
func TestEventChannelIsBufferFull(t *testing.T) {
send := func([]byte) error { return nil }
clockMock := clock.NewMock()
maxBufferSize := int64(15)
maxEventCount := int64(3)
eventChannel := NewEventChannel(send, clockMock, maxBufferSize, maxEventCount, maxTime)
defer eventChannel.Close()
eventChannel.buffer([]byte("one"))
eventChannel.buffer([]byte("two"))
assert.False(t, eventChannel.isBufferFull()) // not yet full by either max buffer size or max event count
eventChannel.buffer([]byte("three"))
assert.True(t, eventChannel.isBufferFull()) // full by event count (3)
eventChannel.reset()
assert.False(t, eventChannel.isBufferFull()) // was just reset, should not be full
eventChannel.buffer([]byte("larger-than-15-characters"))
assert.True(t, eventChannel.isBufferFull()) // full by max buffer size
}
func TestEventChannelReset(t *testing.T) {
send := func([]byte) error { return nil }
clockMock := clock.NewMock()
eventChannel := NewEventChannel(send, clockMock, largeBufferSize, largeEventCount, maxTime)
defer eventChannel.Close()
assert.Zero(t, eventChannel.metrics.eventCount)
assert.Zero(t, eventChannel.metrics.bufferSize)
eventChannel.buffer([]byte("one"))
assert.NotZero(t, eventChannel.metrics.eventCount)
assert.NotZero(t, eventChannel.metrics.bufferSize)
eventChannel.reset()
assert.Zero(t, eventChannel.buff.Len())
assert.Zero(t, eventChannel.metrics.eventCount)
assert.Zero(t, eventChannel.metrics.bufferSize)
}
func TestEventChannelFlush(t *testing.T) {
dataSent := make(chan []byte)
send := newSender(dataSent)
clockMock := clock.NewMock()
eventChannel := NewEventChannel(send, clockMock, largeBufferSize, largeEventCount, maxTime)
defer eventChannel.Close()
eventChannel.buffer([]byte("one"))
eventChannel.buffer([]byte("two"))
eventChannel.buffer([]byte("three"))
eventChannel.flush()
data, _ := readChanOrTimeout(t, dataSent)
assert.Equal(t, "onetwothree", readGz(data))
}
func TestEventChannelClose(t *testing.T) {
dataSent := make(chan []byte)
send := newSender(dataSent)
clockMock := clock.NewMock()
eventChannel := NewEventChannel(send, clockMock, largeBufferSize, largeEventCount, maxTime)
eventChannel.buffer([]byte("one"))
eventChannel.buffer([]byte("two"))
eventChannel.buffer([]byte("three"))
eventChannel.Close()
data, _ := readChanOrTimeout(t, dataSent)
assert.Equal(t, "onetwothree", readGz(data))
}
func TestEventChannelPush(t *testing.T) {
dataSent := make(chan []byte)
send := newSender(dataSent)
clockMock := clock.NewMock()
eventChannel := NewEventChannel(send, clockMock, largeBufferSize, largeEventCount, 1*time.Second)
defer eventChannel.Close()
eventChannel.Push([]byte("1"))
eventChannel.Push([]byte("2"))
eventChannel.Push([]byte("3"))
clockMock.Add(1 * time.Second) // trigger event timer
data, _ := readChanOrTimeout(t, dataSent)
assert.ElementsMatch(t, []byte{'1', '2', '3'}, []byte(readGz(data)))
}
|
package parser
import (
"fmt"
"github.com/bingo-lang/bingo/ast"
)
func (p *Parser) parseExpression(precedence Precedence) (expression ast.Expression, err error) {
switch {
case p.tokenIsInteger():
expression, err = p.parseExpressionInteger()
case p.tokenIsBoolean():
expression, err = p.parseExpressionBoolean()
case p.tokenIsIdentifier():
expression, err = p.parseExpressionIdentifier()
case p.tokenIsLParen():
expression, err = p.parseExpressionGrouped()
case p.tokenIsUnaryOperator():
expression, err = p.parseExpressionUnary()
default:
err = fmt.Errorf("Invalid token %q", p.token.Value)
}
if err != nil {
return
}
// TODO(tugorez): Create a util function to better validate this precedence validation.
for pr := p.precedence(); pr > precedence && err == nil; pr = p.precedence() {
expression, err = p.parseExpressionBinary(expression, pr)
}
return
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"reflect"
"testing"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/debug"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestGetDebugger(t *testing.T) {
tests := []struct {
description string
runMode config.RunMode
isNoop bool
}{
{
description: "unspecified run mode defaults to disabled",
isNoop: true,
},
{
description: "run mode set to debug",
runMode: config.RunModes.Debug,
},
{
description: "run mode set to dev",
runMode: config.RunModes.Dev,
isNoop: true,
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
d := NewDebugger(test.runMode, nil, nil, "")
t.CheckDeepEqual(test.isNoop, reflect.Indirect(reflect.ValueOf(d)).Type() == reflect.TypeOf(debug.NoopDebugger{}))
})
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func main() {
var a, b int
fmt.Scanf("%d %d", &a, &b)
buffer := bufio.NewReader(os.Stdin)
s, _ := buffer.ReadString('\n')
words, _ := buffer.ReadString('\n')
tokens := strings.Split(words, " ")
w1, w2 := tokens[0], tokens[1]
if a+b == len(strings.TrimSpace(s)) {
fmt.Println(w1)
} else {
fmt.Println(w2)
}
}
|
package controllers
import (
"github.com/pkg/errors"
kube_core "k8s.io/api/core/v1"
kube_ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/kumahq/kuma/pkg/core/resources/manager"
defaults_mesh "github.com/kumahq/kuma/pkg/defaults/mesh"
mesh_k8s "github.com/kumahq/kuma/pkg/plugins/resources/k8s/native/api/v1alpha1"
)
// MeshDefaultsReconciler creates default resources for created Mesh
type MeshDefaultsReconciler struct {
ResourceManager manager.ResourceManager
}
func (r *MeshDefaultsReconciler) Reconcile(req kube_ctrl.Request) (kube_ctrl.Result, error) {
if err := defaults_mesh.EnsureDefaultMeshResources(r.ResourceManager, req.Name); err != nil {
return kube_ctrl.Result{}, errors.Wrap(err, "could not create default mesh resources")
}
return kube_ctrl.Result{}, nil
}
func (r *MeshDefaultsReconciler) SetupWithManager(mgr kube_ctrl.Manager) error {
if err := kube_core.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrapf(err, "could not add %q to scheme", kube_core.SchemeGroupVersion)
}
if err := mesh_k8s.AddToScheme(mgr.GetScheme()); err != nil {
return errors.Wrapf(err, "could not add %q to scheme", mesh_k8s.GroupVersion)
}
return kube_ctrl.NewControllerManagedBy(mgr).
For(&mesh_k8s.Mesh{}, builder.WithPredicates(onlyCreate)).
Complete(r)
}
// we only want to react on Create events. User may want to delete default resources, we don't want to add them again when they update the Mesh
var onlyCreate = predicate.Funcs{
CreateFunc: func(event event.CreateEvent) bool {
return true
},
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
return false
},
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
return false
},
GenericFunc: func(genericEvent event.GenericEvent) bool {
return false
},
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import "sync"
// Broadcaster forwards all messages to all supplied handlers.
// Broadcaster implements the Handler interface.
type Broadcaster struct {
m sync.RWMutex
l []Handler
}
// Broadcast forwards all messages sent to Broadcast to all supplied handlers.
// Additional handlers can be added with Listen.
func Broadcast(handlers ...Handler) *Broadcaster {
b := &Broadcaster{}
for _, h := range handlers {
b.Listen(h)
}
return b
}
// Listen calls adds h to the list of handlers that are informed of each log
// message passed to Handle.
func (b *Broadcaster) Listen(h Handler) (unlisten func()) {
b.m.Lock()
defer b.m.Unlock()
b.l = append(b.l, h)
return func() { b.remove(h) }
}
// Handle broadcasts the page to all the listening handlers.
func (b *Broadcaster) Handle(m *Message) {
b.m.RLock()
defer b.m.RUnlock()
for _, h := range b.l {
h.Handle(m)
}
}
// Count returns the number of registered handlers.
func (b *Broadcaster) Count() int {
b.m.RLock()
defer b.m.RUnlock()
return len(b.l)
}
// Close calls Close on all the listening handlers and removes them from the
// broadcaster.
func (b *Broadcaster) Close() {
b.m.Lock()
defer b.m.Unlock()
for _, h := range b.l {
h.Close()
}
b.l = []Handler{}
}
func (b *Broadcaster) remove(h Handler) {
b.m.Lock()
defer b.m.Unlock()
for i, t := range b.l {
if h == t {
copy(b.l[i:], b.l[i+1:])
b.l = b.l[:len(b.l)-1]
return
}
}
}
|
package controller
import (
"fmt"
"strconv"
"github.com/achimonchi/belajar_restapi_mux/src/modules/profile/model"
"github.com/achimonchi/belajar_restapi_mux/src/modules/profile/repository"
)
func GetAll(repo repository.BookRepository) (model.Books, error) {
books, err := repo.FindAll()
if err != nil {
return nil, err
}
return books, nil
}
func GetByID(id string, repo repository.BookRepository) (*model.Book, error) {
idInt, _ := strconv.Atoi(id)
book, err := repo.FindByID(idInt)
if err != nil {
return nil, err
}
return book, nil
}
func SaveBook(p *model.Book, repo repository.BookRepository) error {
err := repo.Save(p)
if err != nil {
return err
}
fmt.Println("Save Success !")
return nil
}
func Update(b *model.Book, repo repository.BookRepository) error {
err := repo.Update(b.ID, b)
if err != nil {
fmt.Println(err)
return err
}
fmt.Println("Update success !")
return nil
}
func Delete(id string, repo repository.BookRepository) error {
ID, _ := strconv.Atoi(id)
err := repo.Delete(ID)
if err != nil {
return err
}
fmt.Print("Delete Success !")
return nil
}
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
*/
package dfc_test
import (
"sync"
"sync/atomic"
"testing"
"time"
"runtime/debug"
"github.com/NVIDIA/dfcpub/dfc"
"github.com/NVIDIA/dfcpub/pkg/client"
)
type repFile struct {
repetitions int
filename string
}
type metadata struct {
t *testing.T
smap dfc.Smap
delay time.Duration
semaphore chan struct{}
controlCh chan struct{}
repFilenameCh chan repFile
wg *sync.WaitGroup
bucket string
targetDirectURL string
sid string
otherTasksToTrigger int
origNumTargets int
num int
numGetsEachFile int
numGetErrsBefore uint64
numGetErrsAfter uint64
getsCompleted uint64
reregistered uint64
}
// Intended for a deployment with multiple targets
// 1. Unregister target T
// 2. Create local bucket
// 3. PUT large amount of objects into the local bucket
// 4. GET the objects while simultaneously re-registering the target T
func TestGetAndReRegisterInParallel(t *testing.T) {
const (
num = 20000
filesize = uint64(1024)
seed = int64(111)
maxErrPct = 5
)
var (
err error
m = metadata{
t: t,
delay: 18 * time.Second,
num: num,
numGetsEachFile: 5,
repFilenameCh: make(chan repFile, num),
semaphore: make(chan struct{}, 10), // 10 concurrent GET requests at a time
wg: &sync.WaitGroup{},
bucket: TestLocalBucketName,
}
// With the current design, there exists a brief period of time
// during which GET errors can occur - see the timeline comment below
filenameCh = make(chan string, m.num)
errch = make(chan error, m.num)
sgl *dfc.SGLIO
)
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// Step 1.
m.smap, err = client.GetClusterMap(proxyurl)
checkFatal(err, t)
m.origNumTargets = len(m.smap.Tmap)
if m.origNumTargets < 2 {
t.Fatalf("Must have 2 or more targets in the cluster, have only %d", m.origNumTargets)
}
for m.sid = range m.smap.Tmap {
break
}
m.targetDirectURL = m.smap.Tmap[m.sid].DirectURL
err = client.UnregisterTarget(proxyurl, m.sid)
checkFatal(err, t)
n := len(getClusterMap(t).Tmap)
if n != m.origNumTargets-1 {
t.Fatalf("%d targets expected after unregister, actually %d targets", m.origNumTargets-1, n)
}
tlogf("Unregistered target %s: the cluster now has %d targets\n", m.sid, n)
// Step 2.
err = client.CreateLocalBucket(proxyurl, m.bucket)
checkFatal(err, t)
defer func() {
err = client.DestroyLocalBucket(proxyurl, m.bucket)
checkFatal(err, t)
}()
if usingSG {
sgl = dfc.NewSGLIO(filesize)
defer sgl.Free()
}
// Step 3.
tlogf("Putting %d files into bucket %s...\n", num, m.bucket)
putRandomFiles(0, seed, filesize, num, m.bucket, t, nil, errch, filenameCh, SmokeDir, SmokeStr, "", true, sgl)
selectErr(errch, "put", t, false)
close(filenameCh)
for f := range filenameCh {
m.repFilenameCh <- repFile{repetitions: m.numGetsEachFile, filename: f}
}
// Step 4.
m.wg.Add(num*m.numGetsEachFile + 1)
go doReregisterTarget(&m)
doGetsInParallel(&m)
m.wg.Wait()
// ===================================================================
// the timeline (denoted as well in the doReregisterTarget() function) looks as follows:
// - T1: client executes ReRegister
// - T2: the cluster map gets updated
// - T3: re-registered target gets the updated local bucket map
// all the while GETs are running, and the "before" and "after" counters are almost
// exactly "separated" by the time T3 ("almost" because of the Sleep in doGetsInParallel())
// ===================================================================
resultsBeforeAfter(&m, num, maxErrPct)
}
// All of the above PLUS proxy failover/failback sequence in parallel
// Namely:
// 1. Unregister a target
// 2. Create a local bucket
// 3. Crash the primary proxy and PUT in parallel
// 4. Failback to the original primary proxy, re-register target, and GET in parallel
func TestProxyFailbackAndReRegisterInParallel(t *testing.T) {
const (
num = 20000
otherTasksToTrigger = 1
filesize = uint64(1024)
seed = int64(111)
maxErrPct = 5
)
var (
err error
m = metadata{
t: t,
delay: 15 * time.Second,
otherTasksToTrigger: otherTasksToTrigger,
num: num,
numGetsEachFile: 5,
repFilenameCh: make(chan repFile, num),
semaphore: make(chan struct{}, 10), // 10 concurrent GET requests at a time
controlCh: make(chan struct{}, otherTasksToTrigger-1),
wg: &sync.WaitGroup{},
bucket: TestLocalBucketName,
}
// Currently, a small percentage of GET errors can be reasonably expected as a result of this test.
// With the current design of dfc, there is exists a brief period in which the cluster map is synced to
// all nodes in the cluster during re-registering. During this period, errors can occur.
filenameCh = make(chan string, m.num)
errch = make(chan error, m.num)
sgl *dfc.SGLIO
)
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// Step 1.
m.smap, err = client.GetClusterMap(proxyurl)
checkFatal(err, t)
m.origNumTargets = len(m.smap.Tmap)
if m.origNumTargets < 2 {
t.Fatalf("Must have 2 or more targets in the cluster, have only %d", m.origNumTargets)
}
if len(m.smap.Pmap) < 3 {
t.Fatalf("Must have 3 or more proxies/gateways in the cluster, have only %d", len(m.smap.Pmap))
}
for m.sid = range m.smap.Tmap {
break
}
m.targetDirectURL = m.smap.Tmap[m.sid].DirectURL
err = client.UnregisterTarget(proxyurl, m.sid)
checkFatal(err, t)
n := len(getClusterMap(t).Tmap)
if n != m.origNumTargets-1 {
t.Fatalf("%d targets expected after unregister, actually %d targets", m.origNumTargets-1, n)
}
tlogf("Unregistered target %s: the cluster now has %d targets\n", m.sid, n)
// Step 2.
m.bucket = TestLocalBucketName
err = client.CreateLocalBucket(proxyurl, m.bucket)
checkFatal(err, t)
defer func() {
err = client.DestroyLocalBucket(proxyurl, m.bucket)
checkFatal(err, t)
}()
if usingSG {
sgl = dfc.NewSGLIO(filesize)
defer sgl.Free()
}
// Step 3.
m.wg.Add(1)
go func() {
primaryCrash(t)
m.wg.Done()
}()
// PUT phase is timed to ensure it doesn't finish before primaryCrash() begins
time.Sleep(5 * time.Second)
tlogf("Putting %d files into bucket %s...\n", num, m.bucket)
putRandomFiles(0, seed, filesize, num, m.bucket, t, nil, errch, filenameCh, SmokeDir, SmokeStr, "", true, sgl)
selectErr(errch, "put", t, false)
close(filenameCh)
for f := range filenameCh {
m.repFilenameCh <- repFile{repetitions: m.numGetsEachFile, filename: f}
}
m.wg.Wait()
// Step 4.
m.wg.Add(m.num*m.numGetsEachFile + 2)
go doReregisterTarget(&m)
go func() {
<-m.controlCh
primarySetToOriginal(t)
m.wg.Done()
}()
doGetsInParallel(&m)
m.wg.Wait()
resultsBeforeAfter(&m, num, maxErrPct)
}
// see above - the T1/2/3 timeline and details
func resultsBeforeAfter(m *metadata, num, maxErrPct int) {
tlogf("Errors before and after time=T3 (re-registered target gets the updated local bucket map): %d and %d, respectively\n",
m.numGetErrsBefore, m.numGetErrsAfter)
pctBefore := int(m.numGetErrsBefore) * 100 / (num * m.numGetsEachFile)
pctAfter := int(m.numGetErrsAfter) * 100 / (num * m.numGetsEachFile)
if pctBefore > maxErrPct || pctAfter > maxErrPct {
m.t.Fatalf("Error rates before %d%% or after %d%% T3 exceed the max %d%%\n", pctBefore, pctAfter, maxErrPct)
}
}
func doGetsInParallel(m *metadata) {
for i := 0; i < 10; i++ {
m.semaphore <- struct{}{}
}
tlogf("Getting each of the %d files %d times from bucket %s...\n", m.num, m.numGetsEachFile, m.bucket)
// GET is timed so a large portion of requests will happen both before and after the target is re-registered
time.Sleep(m.delay)
for i := 0; i < m.num*m.numGetsEachFile; i++ {
go func() {
<-m.semaphore
defer func() {
m.semaphore <- struct{}{}
m.wg.Done()
}()
repFile := <-m.repFilenameCh
if repFile.repetitions > 0 {
repFile.repetitions -= 1
m.repFilenameCh <- repFile
}
_, _, err := client.Get(proxyurl, m.bucket, SmokeStr+"/"+repFile.filename, nil, nil, false, false)
if err != nil {
r := atomic.LoadUint64(&(m.reregistered))
if r == 1 {
atomic.AddUint64(&(m.numGetErrsAfter), 1)
} else {
atomic.AddUint64(&(m.numGetErrsBefore), 1)
}
}
g := atomic.AddUint64(&(m.getsCompleted), 1)
if g%5000 == 0 {
tlogf(" %d/%d GET requests completed...\n", g, m.num*m.numGetsEachFile)
}
// Tell other tasks they can begin to do work in parallel
if int(g) == m.num*m.numGetsEachFile/2 {
for i := 0; i < m.otherTasksToTrigger; i++ {
m.controlCh <- struct{}{}
}
}
}()
}
}
func doReregisterTarget(m *metadata) {
const (
timeout12 = time.Second * 10
sleeptime = time.Millisecond * 10
loopcnt = int(timeout12 / sleeptime)
)
defer m.wg.Done()
// T1
err := client.RegisterTarget(m.sid, m.smap)
checkFatal(err, m.t)
for i := 0; i < loopcnt; i++ {
time.Sleep(sleeptime)
if len(m.smap.Tmap) < m.origNumTargets {
m.smap = getClusterMap(m.t)
// T2
if len(m.smap.Tmap) == m.origNumTargets {
tlogf("T2: re-registered target %s\n", m.sid)
}
} else {
lbNames, err := client.GetLocalBucketNames(m.targetDirectURL)
checkFatal(err, m.t)
// T3
if stringInSlice(m.bucket, lbNames.Local) {
s := atomic.CompareAndSwapUint64(&m.reregistered, 0, 1)
if !s {
m.t.Errorf("reregistered should have swapped from 0 to 1. Actual reregistered = %d\n", m.reregistered)
}
tlogf("T3: re-registered target %s got updated with the new local bucket map\n", m.sid)
break
}
}
}
}
func checkFatal(err error, t *testing.T) {
if err != nil {
debug.PrintStack()
t.Fatal(err)
}
}
|
package workspaces
import (
"path/filepath"
"github.com/jenkins-x/jx-helpers/v3/pkg/yamls"
"github.com/jenkins-x/octant-jx/pkg/common/files"
)
type Octants struct {
Octants []Octant
fileName string
}
type Octant struct {
Name string `json:"name"`
Dir string `json:"dir"`
KubeConfigPath string `json:"kubeConfig"`
Port int `json:"port"`
Admin bool `json:"admin"`
}
func NewOctants() (*Octants, error) {
o := &Octants{
fileName: filepath.Join(files.JXOPSHomeDir(), "octants.yaml"),
}
err := yamls.LoadFile(o.fileName, &o.Octants)
if err != nil {
return nil, err
}
return o, nil
}
// Get gets the octant values
func (o *Octants) Get(workspace *Workspace) Octant {
name := workspace.Name
var answer Octant
for _, r := range o.Octants {
if r.Name == name {
answer = r
break
}
}
answer.Name = name
if workspace.Default {
answer.Admin = true
}
if workspace.Port > 0 && answer.Port <= 0 {
answer.Port = workspace.Port
}
return answer
}
// Set updates the octant values returning true if its a new octant
func (o *Octants) Set(values Octant) bool {
for i, r := range o.Octants {
if r.Name == values.Name {
o.Octants[i] = values
return false
}
}
o.Octants = append(o.Octants, values)
return true
}
func (o *Octants) Save() error {
return SaveOctants(o.Octants)
}
func LoadOctants() ([]Octant, error) {
answer := []Octant{}
fileName := filepath.Join(files.JXOPSHomeDir(), "octants.yaml")
err := yamls.LoadFile(fileName, &answer)
return answer, err
}
func SaveOctants(octants []Octant) error {
fileName := filepath.Join(files.JXOPSHomeDir(), "octants.yaml")
err := yamls.SaveFile(octants, fileName)
return err
}
|
package main
import (
"fmt"
)
func greetings(name string, callback func(string)) {
callback(name)
}
func main() {
greetings("Bryan", func(s string) {
fmt.Println("Greetings!", s)
})
filtered := filter([]int{1, 2, 3, 4, 5}, func(n int) bool {
return n != 5 // change this for testing!
})
fmt.Println(filtered)
}
func filter(numbers []int, callback func(int) bool) []int {
filtered := []int{}
for _, v := range numbers {
if callback(v) {
filtered = append(filtered, v)
}
}
return filtered
}
// callback: passing a func as an argument
// not really idiomatic ..
|
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lifecycle
import "encoding/xml"
// AbortIncompleteMultipartUpload is unsupported, but some clients want to see
// the policy when setting it.
type AbortIncompleteMultipartUpload struct {
XMLName xml.Name `xml:"AbortIncompleteMultipartUpload"`
DaysAfterInitiation int `xml:"DaysAfterInitiation,omitempty"`
}
// MarshalXML so Pure Storage is happy.
func (a AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if a.DaysAfterInitiation == 0 {
a.DaysAfterInitiation = 1
}
type abortIncompleteMultipartUpload AbortIncompleteMultipartUpload
return e.EncodeElement(abortIncompleteMultipartUpload(a), start)
}
|
package util
import (
"crypto/rand"
"fmt"
"io"
"github.com/gorilla/securecookie"
"github.com/gorilla/sessions"
)
// Store - secure cookie store
var Store = sessions.NewCookieStore(
[]byte(securecookie.GenerateRandomKey(64)), //Signing key
[]byte(securecookie.GenerateRandomKey(32)))
func init() {
Store.Options.HttpOnly = true
Store.MaxAge(3600 * 24) // max age is 24 hours of log tailing
}
// GenerateSecureKey - Key for CSRF Tokens
func GenerateSecureKey() string {
// Inspired from gorilla/securecookie
k := make([]byte, 32)
io.ReadFull(rand.Reader, k)
return fmt.Sprintf("%x", k)
}
|
// Given a non-empty array of digits representing a non-negative integer, increment one to the integer.
//
// The digits are stored such that the most significant digit is at the head of the list, and each element in the array contains a single digit.
//
// You may assume the integer does not contain any leading zero, except the number 0 itself.
//
//
// Example 1:
//
//
// Input: digits = [1,2,3]
// Output: [1,2,4]
// Explanation: The array represents the integer 123.
//
//
// Example 2:
//
//
// Input: digits = [4,3,2,1]
// Output: [4,3,2,2]
// Explanation: The array represents the integer 4321.
//
//
// Example 3:
//
//
// Input: digits = [0]
// Output: [1]
//
//
//
// Constraints:
//
//
// 1 <= digits.length <= 100
// 0 <= digits[i] <= 9
//
//
func plusOne(digits []int) []int {
if len(digits) == 0 {
return digits
}
carry := 1
for i := len(digits) - 1; i > -1; i-- {
if digits[i] + carry > 9 {
digits[i] = 0
carry = 1
} else {
digits[i] += carry
carry = 0
}
}
if digits[0] == 0 && carry == 1 {
digits = append([]int{1}, digits...)
}
return digits
}
|
package repository
import (
"editorApi/commons"
"editorApi/init/mgdb"
"editorApi/requests"
"editorApi/responses"
"editorApi/tools/helpers"
"github.com/gin-gonic/gin"
uuid "github.com/satori/go.uuid"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo/options"
"time"
)
const tb_content_reports = "content_reports"
type ContentReports struct {
ID string `json:"id" bson:"_id"` // ID
DataVersion int64 `json:"dataVersion" bson:"data_version"` // 版本
UserId string `json:"userId" bson:"user_id"` // 用户ID
Uuid string `json:"uuid" bson:"uuid"` // uuid
Code string `json:"code" bson:"code"` // 课程编码
Tags string `json:"tags" bson:"tags"` // tags
Agent string `json:"agent" bson:"agent"` // agent
DataArea string `json:"dataArea" bson:"data_area"` //
ParentUuids []string `json:"parentUuids" bson:"parent_uuids"` // 所有父节点
LangCode string `json:"langCode" bson:"lang_code"` // 归属语言
Desc string `json:"desc" bson:"desc"` // 描述
Img string `json:"img" bson:"img"` // 图片地址
CreatedTime time.Time `json:"createdTime" bson:"created_time"` // 创建时间
Status int `json:"status" bson:"status"` // 状态 1,已处理;0,未处理;默认0
}
// 创建课程内容反馈
func (m *ContentReports) Create(ctx *gin.Context, params requests.ContentReportsCreateRequests) (inserted_id interface{}, err error) {
db := mgdb.MongoClient.Database(mgdb.DbContent).Collection(tb_content_reports)
var add = bson.M{
"data_version": params.DataVersion,
"user_id": params.UserId,
"uuid": uuid.NewV4().String(),
"code": params.Code,
"tags": params.Tags,
"agent": params.Agent,
"data_area": params.DataArea,
"parent_uuids": params.ParentUuids,
"lang_code": params.LangCode,
"desc": params.Desc,
"img": params.Img,
}
insertResult, err := db.InsertOne(ctx, add)
if err != nil {
return
}
inserted_id = insertResult.InsertedID
return
}
// 获取课程内容反馈更多数据
func (m *ContentReports) Find(ctx *gin.Context, params requests.ContentReportsFindRequests) (result []responses.ContentReportsResponses, err error) {
db := mgdb.MongoClient.Database(mgdb.DbContent).Collection(tb_content_reports)
// 过滤条件
var filter = bson.D{}
if !helpers.Empty(params.ID) {
id, _ := primitive.ObjectIDFromHex(params.ID)
filter = append(filter, bson.E{"_id", id})
}
if !helpers.Empty(params.UserId) {
filter = append(filter, bson.E{"user_id", params.UserId})
}
if !helpers.Empty(params.Uuid) {
filter = append(filter, bson.E{"uuid", params.Uuid})
}
cursor, err := db.Find(ctx, filter)
if err != nil {
return
}
err = cursor.All(ctx, &result)
return
}
// 获取课程内容反馈
func (m *ContentReports) FindOne(ctx *gin.Context, params requests.ContentReportsFindOneRequests) (result responses.ContentReportsResponses, err error) {
db := mgdb.MongoClient.Database(mgdb.DbContent).Collection(tb_content_reports)
// 过滤条件
var filter = bson.D{}
if !helpers.Empty(params.Code) {
filter = append(filter, bson.E{"code", params.Code})
}
if !helpers.Empty(params.Uuid) {
filter = append(filter, bson.E{"uuid", params.Uuid})
}
singleResult := db.FindOne(ctx, filter)
if err != nil {
return
}
err = singleResult.Decode(&result)
return
}
// 获取课程内容反馈带分页
func (m *ContentReports) List(ctx *gin.Context, params requests.ContentReportsListRequests) (result []responses.ContentReportsResponses, err error) {
db := mgdb.MongoClient.Database(mgdb.DbContent).Collection(tb_content_reports)
// 过滤条件
var filter = bson.D{}
if !helpers.Empty(params.ID) {
id, _ := primitive.ObjectIDFromHex(params.ID)
filter = append(filter, bson.E{"_id", id})
}
if !helpers.Empty(params.Uuid) {
filter = append(filter, bson.E{"uuid", params.Uuid})
}
//code
if !helpers.Empty(params.Code) {
filter = append(filter, bson.E{"code", params.Code})
}
//lang_code
if !helpers.Empty(params.LangCode) {
filter = append(filter, bson.E{"lang_code", params.LangCode})
}
//desc
if !helpers.Empty(params.Desc) {
filter = append(filter, bson.E{"desc", primitive.Regex{Pattern: params.Desc, Options: "i"}})
}
page := commons.DefaultPage()
if !helpers.Empty(params.PageSize) {
page.Limit = params.PageSize
}
if !helpers.Empty(params.PageIndex) && params.PageIndex > 0 {
page.Skip = (params.PageIndex - 1) * page.Limit
}
option := options.Find().SetSkip(page.Skip).SetLimit(page.Limit)
if !helpers.Empty(params.SortType) && !helpers.Empty(params.TextField) {
option = option.SetSort(bson.M{
helpers.CamelToCase(params.TextField): params.SortType,
})
}
cursor, err := db.Find(ctx, filter, option)
defer cursor.Close(ctx)
if err != nil {
return
}
err = cursor.All(ctx, &result)
return
}
// 更新课程内容反馈
func (m *ContentReports) Update(ctx *gin.Context, params requests.ContentReportsUpdateRequests) (updateResult interface{}, err error) {
db := mgdb.MongoClient.Database(mgdb.DbContent).Collection(tb_content_reports)
var filter = bson.D{}
if !helpers.Empty(params.Uuid) {
filter = append(filter, bson.E{"uuid", params.Uuid})
}
if !helpers.Empty(params.ID) {
id, _ := primitive.ObjectIDFromHex(params.ID)
filter = append(filter, bson.E{"_id", id})
}
var update = bson.M{
"data_version": params.DataVersion,
"user_id": params.UserId,
"uuid": params.Uuid,
"code": params.Code,
"tags": params.Tags,
"agent": params.Agent,
"data_area": params.DataArea,
"parent_uuids": params.ParentUuids,
"lang_code": params.LangCode,
"desc": params.Desc,
"img": params.Img,
"status": params.Status,
}
updateResult, err = db.UpdateOne(ctx, filter, bson.M{"$set": update})
if err != nil {
return
}
return
}
// 删除课程内容反馈
func (m *ContentReports) Delete(ctx *gin.Context, params requests.ContentReportsDeleteRequests) (deleteResult interface{}, err error) {
db := mgdb.MongoClient.Database(mgdb.DbContent).Collection(tb_content_reports)
var filter = bson.D{}
if !helpers.Empty(params.Uuid) {
filter = append(filter, bson.E{"uuid", params.Uuid})
}
if !helpers.Empty(params.ID) {
id, _ := primitive.ObjectIDFromHex(params.ID)
filter = append(filter, bson.E{"_id", id})
}
deleteResult, err = db.DeleteOne(ctx, filter)
return
}
|
package main
import (
"strconv"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
)
func Limiter (rs *RedisServer) gin.HandlerFunc {
return func(c *gin.Context) {
ip := c.ClientIP()
if err := rs.Lock(); err != nil {
log.Fatalf("database server lock error: %v", err)
}
if !rs.CheckExist(ip) {
err := rs.Set(ip)
if err != nil {
log.Fatalf("middleware.Limiter err: %v", err)
}
}
XRemaining, XReset, err := rs.Get(ip)
if err != nil {
log.Fatalf("middleware.Limiter err: %v", err)
}
XRemaingInt, err := strconv.Atoi(XRemaining)
if err != nil {
log.Fatalf("middleware.Limiter err: %v", err)
}
if XRemaingInt < 0 {
c.JSON(429, nil)
} else {
rs.IncreaseVisit(ip)
c.Header("X-RateLimit-Remaining", XRemaining)
c.Header("X-RateLimit-Reset", XReset )
c.JSON(200, nil)
}
if err := rs.Unlock(); err != nil {
log.Fatalf("redis server unlock error: %v", err)
}
}
} |
package cc
import (
"testing"
)
var threateningTests = []struct {
c uint8 // columns on board
r uint8 // rows on board
p Piece // piece to test
x uint8 // column to place piece
y uint8 // row to place piece
out int // Number of threatened cells
}{
{1, 1, King, 0, 0, 0},
{2, 2, King, 0, 0, 3},
{100, 100, King, 0, 0, 3},
{100, 100, King, 50, 50, 8},
{100, 100, King, 99, 99, 3},
{1, 1, Rook, 0, 0, 0},
{2, 2, Rook, 0, 0, 2},
{10, 10, Rook, 0, 0, 18},
{10, 10, Rook, 9, 9, 18},
{10, 10, Rook, 4, 4, 18},
{1, 1, Knight, 0, 0, 0},
{2, 2, Knight, 0, 0, 0},
{5, 5, Knight, 2, 2, 8},
{5, 5, Knight, 4, 4, 2},
{5, 5, Knight, 0, 0, 2},
{5, 5, Knight, 2, 0, 4},
{1, 1, Queen, 0, 0, 0},
{2, 2, Queen, 0, 0, 3},
{5, 5, Queen, 2, 2, 16},
{5, 5, Queen, 4, 4, 12},
{5, 5, Queen, 0, 0, 12},
{1, 1, Bishop, 0, 0, 0},
{2, 2, Bishop, 0, 0, 1},
{5, 5, Bishop, 2, 2, 8},
{5, 5, Bishop, 4, 4, 4},
{5, 5, Bishop, 0, 0, 4},
{1, 1, Blank, 0, 0, 0},
{1, 1, Dead, 0, 0, 0},
}
func TestThreatening(t *testing.T) {
for _, c := range threateningTests {
b := NewBoard(c.c, c.r)
tr, err := c.p.Threatening(&b, c.x, c.y)
if err != nil {
t.Errorf("Error occured: %s", err)
}
if len(tr) != c.out {
t.Errorf("Expected %d got %d: %+v", c.out, len(tr), c)
}
}
}
func TestThreateningOutOfBounds(t *testing.T) {
b := NewBoard(2, 2)
_, err := King.Threatening(&b, 5, 3)
if err == nil {
t.Errorf("Expected out of bounds error")
}
_, err = Rook.Threatening(&b, 5, 3)
if err == nil {
t.Errorf("Expected out of bounds error")
}
_, err = Knight.Threatening(&b, 5, 3)
if err == nil {
t.Errorf("Expected out of bounds error")
}
_, err = Queen.Threatening(&b, 5, 3)
if err == nil {
t.Errorf("Expected out of bounds error")
}
_, err = Bishop.Threatening(&b, 5, 3)
if err == nil {
t.Errorf("Expected out of bounds error")
}
}
// Benchmarks
var smallBoard = NewBoard(3, 3)
var largeBoard = NewBoard(30, 30)
func BenchmarkThreateningKingSmall(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := King.Threatening(&smallBoard, 1, 1)
if len(tr) != 8 {
b.Errorf("Expected %d got %d", 8, len(tr))
}
}
}
func BenchmarkThreateningKingLarge(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := King.Threatening(&largeBoard, 10, 10)
if len(tr) != 8 {
b.Errorf("Expected %d got %d", 8, len(tr))
}
}
}
func BenchmarkThreateningRookSmall(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Rook.Threatening(&smallBoard, 1, 1)
if len(tr) != 4 {
b.Errorf("Expected %d got %d", 8, len(tr))
}
}
}
func BenchmarkThreateningRookLarge(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Rook.Threatening(&largeBoard, 14, 14)
if len(tr) != 58 {
b.Errorf("Expected %d got %d", 8, len(tr))
}
}
}
func BenchmarkThreateningKnightSmall(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Knight.Threatening(&smallBoard, 0, 0)
if len(tr) != 2 {
b.Errorf("Expected %d got %d", 2, len(tr))
}
}
}
func BenchmarkThreateningKnightLarge(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Knight.Threatening(&largeBoard, 10, 10)
if len(tr) != 8 {
b.Errorf("Expected %d got %d", 8, len(tr))
}
}
}
func BenchmarkThreateningQueenSmall(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Queen.Threatening(&smallBoard, 0, 0)
if len(tr) != 6 {
b.Errorf("Expected %d got %d", 6, len(tr))
}
}
}
func BenchmarkThreateningQueenLarge(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Queen.Threatening(&largeBoard, 10, 10)
if len(tr) != 107 {
b.Errorf("Expected %d got %d", 107, len(tr))
}
}
}
func BenchmarkThreateningBishopSmall(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Bishop.Threatening(&smallBoard, 0, 0)
if len(tr) != 2 {
b.Errorf("Expected %d got %d", 2, len(tr))
}
}
}
func BenchmarkThreateningBishopLarge(b *testing.B) {
for n := 0; n < b.N; n++ {
tr, _ := Bishop.Threatening(&largeBoard, 10, 10)
if len(tr) != 49 {
b.Errorf("Expected %d got %d", 49, len(tr))
}
}
}
|
package main
import (
"fmt"
"github.com/jmoiron/sqlx"
"strings"
)
type Adapter struct {
*sqlx.DB
CREATE_VERSION_TABLE string
SELECT_VERSIONS string
INSERT_VERSION string
}
var adapters = map[string]func(string) (*Adapter, error){
"postgres": open_postgres,
}
func Open(u string) (*Adapter, error) {
parts := strings.SplitN(u, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("Invalid connection url: %q", u)
}
f := adapters[parts[0]]
if f == nil {
return nil, fmt.Errorf("Unsupported database type: %q", parts[0])
}
return f(u)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package inputs
import (
"context"
"strings"
"time"
"chromiumos/tast/local/apps"
"chromiumos/tast/local/bundles/cros/inputs/fixture"
"chromiumos/tast/local/bundles/cros/inputs/pre"
"chromiumos/tast/local/bundles/cros/inputs/util"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/ime"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/uiauto/vkb"
"chromiumos/tast/local/chrome/useractions"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: VirtualKeyboardTypingApps,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that the virtual keyboard works in apps",
Contacts: []string{"essential-inputs-gardener-oncall@google.com", "essential-inputs-team@google.com"},
Attr: []string{"group:mainline", "group:input-tools"},
SoftwareDeps: []string{"chrome", "google_virtual_keyboard"},
Fixture: fixture.TabletVK,
SearchFlags: []*testing.StringPair{
{
Key: "ime",
Value: ime.EnglishUS.Name,
},
},
Timeout: 5 * time.Minute,
Params: []testing.Param{
{
ExtraHardwareDeps: hwdep.D(pre.InputsStableModels),
ExtraAttr: []string{"group:input-tools-upstream"},
},
{
Name: "informational",
ExtraHardwareDeps: hwdep.D(pre.InputsUnstableModels),
ExtraAttr: []string{"informational"},
},
},
})
}
func VirtualKeyboardTypingApps(ctx context.Context, s *testing.State) {
// typingKeys indicates a key series that tapped on virtual keyboard.
// Input string should start with lower case letter because VK layout is not auto-capitalized in the settings search bar.
const typingKeys = "language"
cr := s.FixtValue().(fixture.FixtData).Chrome
tconn := s.FixtValue().(fixture.FixtData).TestAPIConn
uc := s.FixtValue().(fixture.FixtData).UserContext
defer faillog.DumpUITreeOnError(ctx, s.OutDir(), s.HasError, tconn)
app := apps.Settings
s.Logf("Launching %s", app.Name)
if err := apps.Launch(ctx, tconn, app.ID); err != nil {
s.Fatalf("Failed to launch %s: %c", app.Name, err)
}
if err := ash.WaitForApp(ctx, tconn, app.ID, time.Minute); err != nil {
s.Fatalf("%s did not appear in shelf after launch: %v", app.Name, err)
}
vkbCtx := vkb.NewContext(cr, tconn)
searchFieldFinder := nodewith.Role(role.SearchBox).Name("Search settings")
validateAction := uiauto.Combine("test virtual keyboard input in settings app",
vkbCtx.ClickUntilVKShown(searchFieldFinder),
vkbCtx.WaitForDecoderEnabled(true),
vkbCtx.TapKeysIgnoringCase(strings.Split(typingKeys, "")),
// Hide virtual keyboard to submit candidate.
vkbCtx.HideVirtualKeyboard(),
// Validate text.
util.WaitForFieldTextToBeIgnoringCase(tconn, searchFieldFinder, typingKeys),
)
if err := uiauto.UserAction("VK typing input",
validateAction,
uc,
&useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeInputField: "OS setting search field",
useractions.AttributeFeature: useractions.FeatureVKTyping,
},
},
)(ctx); err != nil {
s.Fatal("Failed to verify virtual keyboard input in settings: ", err)
}
}
|
package pkg
import (
"testing"
"github.com/mrahbar/kubernetes-inspector/types"
"github.com/stretchr/testify/assert"
"github.com/bouk/monkey"
"os"
"io/ioutil"
"fmt"
"path/filepath"
"path"
)
func TestScp_DirectionUnknown(t *testing.T) {
_, outBuffer, context := defaultContext()
context.Opts = &types.ScpOpts{
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "target",
},
}
osExitCalled := false
patch := monkey.Patch(os.Exit, func(int) {
osExitCalled = true
})
defer patch.Unpatch()
Scp(context)
assert.True(t, osExitCalled)
assert.Contains(t, outBuffer.String(), "Direction must either be 'up' or 'down' resp. first letter. Provided: 'target'")
}
func TestScp_DirectionUp_RemoteFileUnprocessable(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
mockExecutor, outBuffer, context := defaultContext()
context.Opts = &types.ScpOpts{
RemotePath: "/tmp/unknown",
LocalPath: out.Name(),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
called := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
called = true
return &types.SSHOutput{}, fmt.Errorf("Remote lookup failed")
}
Scp(context)
assert.True(t, called)
assert.Contains(t, outBuffer.String(), "Remote path /tmp/unknown is unprocessable: Remote lookup failed")
out.Close()
}
func TestScp_DirectionUp_LocalFileUnprocessable(t *testing.T) {
mockExecutor, outBuffer, context := defaultContext()
context.Opts = &types.ScpOpts{
RemotePath: "/tmp/known",
LocalPath: "/tmp/unknown",
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
called := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
called = true
return &types.SSHOutput{Stdout: "file"}, nil
}
Scp(context)
assert.True(t, called)
assert.Contains(t, outBuffer.String(), "Local path /tmp/unknown is unprocessable")
}
//---- up
func TestScp_DirectionUp_LocalDirToRemoteFile_Invalid(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
context.Opts = &types.ScpOpts{
RemotePath: "/tmp/file",
LocalPath: filepath.Dir(abs),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
called := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
called = true
return &types.SSHOutput{Stdout: "file"}, nil
}
osExitCalled := false
patch := monkey.Patch(os.Exit, func(int) {
osExitCalled = true
})
defer patch.Unpatch()
Scp(context)
out.Close()
assert.True(t, called)
assert.True(t, osExitCalled)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Can not upload directory %s to remote file /tmp/file. Please choose a remote directory", localDir))
}
func TestScp_DirectionUp_LocalFileToRemoteFile_Invalid(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
mockExecutor, outBuffer, context := defaultContext()
context.Opts = &types.ScpOpts{
RemotePath: "/tmp/file",
LocalPath: out.Name(),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "file"}, nil
}
osExitCalled := false
patch := monkey.Patch(os.Exit, func(int) {
osExitCalled = true
})
defer patch.Unpatch()
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, osExitCalled)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Can not upload local file %s to existing remote file /tmp/file. Please choose a remote directory or a new remote filename.", out.Name()))
}
func TestScp_DirectionUp_LocalFileToRemoteDirectory_ScpError(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: out.Name(),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "dir"}, nil
}
calledUpload := false
errMsg := "Error uploading file"
mockExecutor.MockUploadFile = func(remotePath string, localPath string) error {
calledUpload = true
return fmt.Errorf(errMsg)
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledUpload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp failed %s -> %s: %s", out.Name(), path.Join(remote, out.Name()), errMsg))
}
func TestScp_DirectionUp_LocalDirToRemoteDirectory_ScpError(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
remoteDir := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remoteDir,
LocalPath: localDir,
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "dir"}, nil
}
calledUpload := false
errMsg := "Error uploading directory"
mockExecutor.MockUploadDirectory = func(remotePath string, localPath string) error {
calledUpload = true
return fmt.Errorf(errMsg)
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledUpload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp failed %s -> %s: %s", localDir, remoteDir, errMsg))
}
func TestScp_DirectionUp_LocalFileToRemoteNoneFile_Ok(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: out.Name(),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "none"}, nil
}
calledUpload := false
mockExecutor.MockUploadFile = func(remotePath string, localPath string) error {
calledUpload = true
return nil
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledUpload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp %s -> %s finished", out.Name(), remote))
}
func TestScp_DirectionUp_LocalDirToRemoteDir_Ok(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: localDir,
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "up",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "none"}, nil
}
calledUpload := false
mockExecutor.MockUploadDirectory = func(remotePath string, localPath string) error {
calledUpload = true
return nil
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledUpload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp %s -> %s finished", localDir, remote))
}
//---- down
func TestScp_DirectionDown_RemoteDirToLocalFile_Invalid(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: out.Name(),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "down",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "dir"}, nil
}
osExitCalled := false
patch := monkey.Patch(os.Exit, func(int) {
osExitCalled = true
})
defer patch.Unpatch()
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, osExitCalled)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Can not download remote folder %s to local file %s. Please choose a local directory.", remote, out.Name()))
}
func TestScp_DirectionDown_RemoteFileToLocalFile_Invalid(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp/file"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: out.Name(),
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "down",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "file"}, nil
}
osExitCalled := false
patch := monkey.Patch(os.Exit, func(int) {
osExitCalled = true
})
defer patch.Unpatch()
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, osExitCalled)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Can not download remote file %s to existing local file %s. Please choose a local directory or a new local filename.", remote, out.Name()))
}
func TestScp_DirectionDown_RemoteDirectoryToLocalDir_ScpError(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: localDir,
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "down",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "dir"}, nil
}
calledDownload := false
errMsg := "Error downloading directory"
mockExecutor.MockDownloadDirectory = func(remotePath string, localPath string) error {
calledDownload = true
return fmt.Errorf(errMsg)
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledDownload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp failed %s <- %s: %s", localDir, remote, errMsg))
}
func TestScp_DirectionDown_RemoteFileToLocalDir_ScpError(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp/myfile"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: localDir,
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "down",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "file"}, nil
}
calledDownload := false
errMsg := "Error downloading file"
mockExecutor.MockDownloadFile = func(remotePath string, localPath string) error {
calledDownload = true
return fmt.Errorf(errMsg)
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledDownload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp failed %s <- %s: %s", filepath.Join(localDir, "myfile"), remote, errMsg))
}
func TestScp_DirectionDown_RemoteFileToLocalDir_Ok(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp/myfile"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: localDir,
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "down",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "file"}, nil
}
calledDownload := false
mockExecutor.MockDownloadFile = func(remotePath string, localPath string) error {
calledDownload = true
return nil
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledDownload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp %s <- %s finished", filepath.Join(localDir, "myfile"), remote))
}
func TestScp_DirectionDown_RemoteDirectoryToLocalDir_Ok(t *testing.T) {
out, _ := ioutil.TempFile(".", "TestScp_LocalFile")
defer os.Remove(out.Name())
abs, _ := filepath.Abs(out.Name())
localDir := filepath.Dir(abs)
mockExecutor, outBuffer, context := defaultContext()
remote := "/tmp"
context.Opts = &types.ScpOpts{
RemotePath: remote,
LocalPath: localDir,
GenericOpts: types.GenericOpts {
NodeArg: "host1",
TargetArg: "down",
},
}
calledCmd := false
mockExecutor.MockPerformCmd = func(command string, sudo bool) (*types.SSHOutput, error) {
calledCmd = true
return &types.SSHOutput{Stdout: "dir"}, nil
}
calledDownload := false
mockExecutor.MockDownloadDirectory = func(remotePath string, localPath string) error {
calledDownload = true
return nil
}
Scp(context)
out.Close()
assert.True(t, calledCmd)
assert.True(t, calledDownload)
assert.Contains(t, outBuffer.String(), fmt.Sprintf("Scp %s <- %s finished", localDir, remote))
}
|
package resolvers
import (
"strconv"
graphql "github.com/graph-gophers/graphql-go"
"github.com/GibJob-ai/GObjob/model"
)
// file response type
type FileResponse struct {
f *model.File
}
func (r *FileResponse) ID() graphql.ID {
id := strconv.Itoa(int(r.f.ID))
return graphql.ID(id)
}
func (r *FileResponse) Url() string {
return r.f.Url
}
func (r *FileResponse) FileType() string {
return r.f.FileType
}
func (r *FileResponse) DocumentType() string {
return r.f.DocumentType
}
func (r *FileResponse) Name() string {
return r.f.Name
}
func (r *FileResponse) CreatedAt() string {
return r.f.CreatedAt.String()
}
func (r *FileResponse) UpdatedAt() string {
return r.f.UpdatedAt.String()
}
|
package address
// import (
// util "github.com/filecoin-project/specs/util"
// )
// Addresses for singleton system actors
var (
InitActorAddr = &Address_I{} // TODO
CronActorAddr = &Address_I{} // TODO
StoragePowerActorAddr = &Address_I{} // TODO
StorageMarketActorAddr = &Address_I{} // TODO
PaymentChannelActorAddr = &Address_I{} // TODO
BurntFundsActorAddr = &Address_I{} // TODO
)
func (a *Address_I) VerifySyntax(addrType Address_Type) bool {
panic("TODO")
// switch aType := addrType; aType {
// case Address_Protocol.Secp256k1():
// // 80 Bytes
// return len(self)
// case Address_Protocol.ID():
// // ?
// case Address_Protocol.Actor():
// // Blake2b - 64 Bytes
// case Address_Protocol.BLS():
// // BLS-12_381 - 48 Byte PK
// }
}
func (a *Address_I) String() AddressString {
return AddressString("") // TODo
}
func (a *Address_I) IsKeyType() bool {
panic("TODO")
}
func MakeAddress(net Address_NetworkID, t Address_Type) Address {
return &Address_I{
NetworkID_: net,
Type_: t,
}
}
|
package types
import (
"fmt"
"strings"
log "github.com/sirupsen/logrus"
)
// merge merge string elements together, keeping
// other elements intact
func merge(elements ...interface{}) []interface{} {
result := make([]interface{}, 0, len(elements))
buf := &strings.Builder{}
for _, element := range elements {
if element == nil {
continue
}
switch element := element.(type) {
case string:
buf.WriteString(element)
case []byte:
buf.Write(element)
case *StringElement:
buf.WriteString(element.Content)
case []interface{}:
if len(element) > 0 {
f := merge(element...)
if content := buf.String(); len(content) > 0 {
result = append(result, &StringElement{
Content: content,
})
buf = &strings.Builder{}
}
result = merge(append(result, f...)...)
}
default:
// log.Debugf("Merging with 'default' case an element of type %[1]T", element)
if content := buf.String(); len(content) > 0 {
if symbol, ok := element.(*Symbol); ok && symbol.Name == " -- " && strings.HasSuffix(content, " ") {
// trim 1 space from actual result
content = content[:len(content)-1]
}
result = append(result, &StringElement{
Content: content,
})
buf = &strings.Builder{}
}
result = append(result, element)
}
}
// if buf was filled because some text was found
if buf.Len() > 0 {
result = append(result, &StringElement{
Content: buf.String(),
})
}
return result
}
// Flatten
func Flatten(elements []interface{}) []interface{} {
result := make([]interface{}, 0, len(elements))
for _, e := range elements {
switch e := e.(type) {
case []interface{}:
result = append(result, e...)
default:
result = append(result, e)
}
}
return result
}
// AllNilEntries returns true if all the entries in the given `elements` are `nil`
func AllNilEntries(elements []interface{}) bool {
for _, e := range elements {
switch e := e.(type) {
case []interface{}: // empty slice if not `nil` since it has a type
if !AllNilEntries(e) {
return false
}
default:
if e != nil {
return false
}
}
}
return true
}
// ReduceOption an option to apply on the reduced content when it is a `string`
type ReduceOption func(string) string
// Reduce merges and returns a string if the given elements only contain a single StringElement
// (ie, return its `Content`), otherwise return the given elements or empty string if the elements
// is `nil` or an empty `[]interface{}`
func Reduce(elements interface{}, opts ...ReduceOption) interface{} {
switch e := elements.(type) {
case []interface{}:
e = merge(e...)
switch len(e) {
case 0: // if empty, return nil
return nil
case 1:
if e, ok := e[0].(*StringElement); ok {
c := e.Content
for _, apply := range opts {
c = apply(c)
}
return c
}
return e
default:
return e
}
case *StringElement:
return Reduce(e.Content)
case string:
for _, apply := range opts {
e = apply(e)
}
switch len(e) {
case 0:
return nil
default:
return e
}
default:
return elements
}
}
// applyFunc a function to apply on the result of the `apply` function below, before returning
type applyFunc func(s string) string
// Apply applies the given funcs to transform the given input
func Apply(source string, fs ...applyFunc) string {
result := source
for _, f := range fs {
result = f(result)
}
return result
}
func stringify(element interface{}) string {
switch element := element.(type) {
case []interface{}:
result := strings.Builder{}
for _, e := range element {
result.WriteString(stringify(e))
}
return result.String()
case string:
return element
case *StringElement:
return element.Content
case *SpecialCharacter:
return element.Name
case *AttributeReference: // TODO: should never happen?
return "{" + element.Name + "}"
default:
return fmt.Sprintf("%v", element) // "best-effort" here
}
}
// TrimTrailingSpaces trims trailing spaces on the last element (if applicable)
func TrimTrailingSpaces(content []interface{}) []interface{} {
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("trimming trailing spaces on content of type '%T'", content)
}
if len(content) > 0 {
if s, ok := content[len(content)-1].(*StringElement); ok {
s.Content = strings.TrimRight(s.Content, " ")
// if last item was an empty isolated trailing space, then remove it
if len(s.Content) == 0 {
content = content[:len(content)-1]
}
}
}
return content
}
// Append appends all given elements. If an element is an `[]interface{}`, then it appends its content
func Append(elements ...interface{}) ([]interface{}, error) {
result := make([]interface{}, 0, len(elements)) // best guess for initial capacity
for _, e := range elements {
switch e := e.(type) {
case []interface{}:
result = append(result, e...)
case nil:
continue
default:
result = append(result, e)
}
}
return result, nil
}
func SplitElementsPerLine(elements []interface{}) [][]interface{} {
lines := make([][]interface{}, 0, len(elements))
line := make([]interface{}, 0, len(elements))
for _, e := range elements {
switch e := e.(type) {
case *StringElement:
// split
s := strings.Split(e.Content, "\n")
for i := range s {
// only append if string is non-empty
if len(s[i]) > 0 {
line = append(line, &StringElement{
Content: s[i],
})
}
if i < len(s)-1 { // move to next line
lines = append(lines, line)
// reset
line = make([]interface{}, 0, len(elements))
}
}
default:
line = append(line, e)
}
}
// don't forget the last line
if len(line) > 0 {
lines = append(lines, line)
}
return lines
}
// InsertAt inserts the given element in the target at the given index
// (thus moving all following elements by 1)
func InsertAt(elements []interface{}, element interface{}, index int) []interface{} {
if element == nil {
return elements
}
result := make([]interface{}, len(elements)+1)
copy(result[0:index], elements[0:index])
result[index] = element
copy(result[index+1:], elements[index:])
return result
}
|
// +build windows darwin linux,!arm
package gpio
import (
"time"
)
const (
maxIncremenation = 200000
)
//PulseDuration Duration of a pulse on one pin
func PulseDuration(pin uint8, state uint8) (time.Duration, error) {
startTime := time.Now() // Record time when ECHO goes high
return time.Since(startTime), nil // Calculate time lapsed for ECHO to transition from high to low
}
func Open() error {
return nil
}
func Close() {
}
|
package build
import (
"github.com/zouyx/gopt/input"
"fmt"
"os"
"github.com/zouyx/gopt/message"
"bufio"
)
type StructureBuilder interface {
Build(param *input.Params)
}
const (
SRC_PATH ="%v/src"
FULL_PATH =SRC_PATH+"/main"
)
// based input params get project full path
func getFullPath(params *input.Params) string {
return fmt.Sprintf(FULL_PATH, params.ProjectName)
}
// based input params get project full path
func getSrc(params *input.Params) string {
return fmt.Sprintf(SRC_PATH, params.ProjectName)
}
// write to file name
func write(fileName,content string) {
f, err := os.Create(fileName)
if err!=nil{
message.FormatError(func() {
panic(fmt.Sprintf("Create %v fail! error:%v",fileName,err.Error()))
})
}
w := bufio.NewWriter(f)
_, err = w.WriteString(content)
if err!=nil{
message.FormatError(func() {
panic(fmt.Sprintf("Write %v fail! error:%v",fileName,err.Error()))
})
}
w.Flush()
f.Close()
message.Success(fmt.Sprintf("Created %v",fileName))
} |
package main
import (
"fmt"
"net"
"time"
_ "bytes"
_ "io/ioutil"
_ "bufio"
)
func handleConnection(conn net.Conn) {
fmt.Println("conn handle")
}
func main() {
ln, err := net.Listen("tcp", ":8888")
if err != nil {
fmt.Println("listen err:", err)
}
count := 1
for {
conn, err := ln.Accept()
if err != nil {
fmt.Println("accept err:", err)
}
count++
fmt.Println("accept count: ", count)
go handleConnection(conn)
time.Sleep(10 * time.Second)
}
return
}
|
package errors
import (
"bytes"
"encoding/json"
"fmt"
"path"
"runtime"
"sync"
)
// Expose additional information for error.
type CustomError interface {
Error() string
GetCode() uint32
GetInner() error
StackAddrs() string
StackFrames() []StackFrame
GetStack() string
GetStackAsJSON() interface{}
GetFullMessage() string
GetMessages() string
}
// Represents a single stack frame.
type StackFrame struct {
PC uintptr
Func *runtime.Func
FuncName string
File string
LineNumber int
}
type baseError struct {
code uint32
message string
inner error
stack []uintptr
framesOnce sync.Once
stackFrames []StackFrame
}
func (e *baseError) Error() string {
return e.message
}
func (e *baseError) GetCode() uint32 {
return e.code
}
// Return nested error
func (e *baseError) GetInner() error {
return e.inner
}
// Return errors message and stacktrace as string
func (e *baseError) GetFullMessage() string {
return extractFullErrorMessage(e, true)
}
func (e *baseError) GetMessages() string {
return extractFullErrorMessage(e, false)
}
func (e *baseError) StackAddrs() string {
buf := bytes.NewBuffer(make([]byte, 0, len(e.stack)*8))
for _, pc := range e.stack {
fmt.Fprintf(buf, "0x%x ", pc)
}
bufBytes := buf.Bytes()
return string(bufBytes[:len(bufBytes)-1])
}
func (e *baseError) StackFrames() []StackFrame {
e.framesOnce.Do(func() {
e.stackFrames = make([]StackFrame, len(e.stack))
for i, pc := range e.stack {
frame := &e.stackFrames[i]
frame.PC = pc
frame.Func = runtime.FuncForPC(pc)
if frame.Func != nil {
frame.FuncName = frame.Func.Name()
frame.File, frame.LineNumber = frame.Func.FileLine(frame.PC - 1)
}
}
})
return e.stackFrames
}
func (e *baseError) GetStack() string {
stackFrames := e.StackFrames()
buf := bytes.NewBuffer(make([]byte, 0, 256))
for _, frame := range stackFrames {
_, _ = buf.WriteString(frame.FuncName)
_, _ = buf.WriteString("\n")
fmt.Fprintf(buf, "\t%s:%d +0x%x\n",
frame.File, frame.LineNumber, frame.PC)
}
return buf.String()
}
func (e *baseError) GetStackAsJSON() interface{} {
stackFrames := e.StackFrames()
buf := bytes.NewBuffer(make([]byte, 0, 256))
var (
data []byte
i interface{}
)
data = append(data, '[')
for i, frame := range stackFrames {
if i != 0 {
data = append(data, ',')
}
name := path.Base(frame.FuncName)
frameBytes := []byte(fmt.Sprintf(`{"filepath": "%s", "name": "%s", "line": %d}`, frame.File, name, frame.LineNumber))
data = append(data, frameBytes...)
}
data = append(data, ']')
buf.Write(data)
_ = json.Unmarshal(data, &i)
return i
}
func extractFullErrorMessage(e CustomError, includeStack bool) string {
var ok bool
var lastClErr CustomError
errMsg := bytes.NewBuffer(make([]byte, 0, 1024))
dbxErr := e
for {
lastClErr = dbxErr
errMsg.WriteString(dbxErr.Error())
innerErr := dbxErr.GetInner()
if innerErr == nil {
break
}
dbxErr, ok = innerErr.(CustomError)
if !ok {
// We have reached the end and traveresed all inner errors.
// Add last message and exit loop.
errMsg.WriteString(" [")
errMsg.WriteString(innerErr.Error())
errMsg.WriteString("] ")
break
}
errMsg.WriteString("\n")
}
if includeStack {
errMsg.WriteString("\n")
errMsg.WriteString("\n STACK TRACE:\n")
errMsg.WriteString(lastClErr.GetStack())
}
return errMsg.String()
}
func New(code uint32, message string) error {
return new(nil, code, message)
}
func Newf(code uint32, format string, args ...interface{}) error {
return new(nil, code, fmt.Sprintf(format, args...))
}
func new(err error, code uint32, message string) *baseError {
stack := make([]uintptr, 2)
stackLength := runtime.Callers(3, stack)
return &baseError{
message: message,
code: code,
stack: stack[:stackLength],
inner: err,
}
}
// wrap error with custom error
func Wrap(err error, code uint32, message string) error {
return new(err, code, message)
}
func Wrapf(err error, code uint32, format string, args ...interface{}) error {
return new(err, code, fmt.Sprintf(format, args...))
}
// get the root cause error
func RootError(ierr error) (nerr error) {
nerr = ierr
for i := 0; i < 20; i++ {
terr := unwrapError(nerr)
if terr == nil {
return nerr
}
nerr = terr
}
return fmt.Errorf("too many iterations: %T", nerr)
}
func unwrapError(ierr error) (nerr error) {
if clError, ok := ierr.(CustomError); ok {
return clError.GetInner()
}
return nil
}
|
package main
import (
"fmt"
"net"
"os"
"strings"
"sync"
)
type User struct {
Username string
OtherUsername string
Msg string
ServerMsg string
}
var (
user = new(User)
wg sync.WaitGroup
)
func main() {
wg.Add(1)
fmt.Println("请登陆, 输入用户名: ")
fmt.Scanln(&user.Username)
fmt.Println("请输入要给谁发消息:")
fmt.Scanln(&user.OtherUsername)
//创建tcp addr
addr, _ := net.ResolveTCPAddr("tcp4", "localhost:8899")
//创建conn
conn, _ := net.DialTCP("tcp4", nil, addr)
//发送消息
go func() {
fmt.Println("请输入: (只启动时提示一次)")
//循环处理
for {
fmt.Scanln(&user.Msg)
if user.Msg == "exit" {
conn.Close()
wg.Done()
os.Exit(0)
}
conn.Write([]byte(fmt.Sprintf("%s-%s-%s-%s", user.Username, user.OtherUsername, user.Msg, user.ServerMsg)))
}
}()
//接收消息
go func() {
for {
b := make([]byte, 1024)
n, _ := conn.Read(b)
user2 := new(User)
arrStr := strings.Split(string(b[:n]), "-")
user2.Username = arrStr[0]
user2.OtherUsername = arrStr[1]
user2.Msg = arrStr[2]
user2.ServerMsg = arrStr[3]
if user2.ServerMsg != "" {
fmt.Println("\t\t\t服务器消息:", user2.ServerMsg)
} else {
fmt.Println("\t\t\t", user2.Username, ":", user2.Msg)
}
}
}()
wg.Wait()
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
//+build e2e
package pkg
import (
"path/filepath"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
// GetK8sConfig gets K8s config either from inside the cluster or local directory.
func GetK8sConfig() (*restclient.Config, error) {
k8sConfig, err := restclient.InClusterConfig()
if err != nil {
home := homedir.HomeDir()
k8sConfPath := filepath.Join(home, ".kube", "config")
k8sConfig, err = clientcmd.BuildConfigFromFlags("", k8sConfPath)
if err != nil {
return nil, err
}
}
return k8sConfig, nil
}
// GetK8sClient gets K8s core client interface.
func GetK8sClient() (kubernetes.Interface, error) {
k8sConfig, err := GetK8sConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(k8sConfig)
if err != nil {
return nil, err
}
return clientset, nil
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"github.com/ory/keto-maester/keto"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
StatusUpsertRoleFailed StatusCode = "ROLE_UPSERT_FAILED"
)
// ORYAccessControlPolicyRoleSpec defines the desired state of OAuth2Client
type ORYAccessControlPolicyRoleSpec struct {
// +kubebuilder:validation:Enum=exact;glob;regex
//
// Flavor is the flavor for the role
Flavor string `json:"flavor"`
// +kubebuilder:validation:MinLength=1
//
// ID is the role id (name). If this is not provided
// `metadata.name` is used
ID string `json:"id,omitempty"`
// +kubebuilder:validation:MinItems=1
//
// Members is an array of members belonging to this role.
Members []Subject `json:"members"`
// Keto is the optional configuration to use for managing
// this role
Keto Keto `json:"keto,omitempty"`
}
// ORYAccessControlPolicyRoleStatus defines the observed state of OAuth2Client
type ORYAccessControlPolicyRoleStatus struct {
// ObservedGeneration represents the most recent generation observed by the daemon set controller.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
ReconciliationError ReconciliationError `json:"reconciliationError,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// ORYAccessControlPolicyRole is the Schema for the ORYAccessControlPolicyRole API
type ORYAccessControlPolicyRole struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ORYAccessControlPolicyRoleSpec `json:"spec,omitempty"`
Status ORYAccessControlPolicyRoleStatus `json:"status,omitempty"`
}
// GetKeto returns the keto config on the spec
func (o ORYAccessControlPolicyRole) GetKeto() Keto {
return o.Spec.Keto
}
// +kubebuilder:object:root=true
// ORYAccessControlPolicyRoleList contains a list of ORYAccessControlPolicyRole
type ORYAccessControlPolicyRoleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ORYAccessControlPolicyRole `json:"items"`
}
func init() {
SchemeBuilder.Register(&ORYAccessControlPolicyRole{}, &ORYAccessControlPolicyRoleList{})
}
// ToORYAccessControlPolicyRoleJSON converts an ORYAccessControlPolicyRole into a ORYAccessControlPolicyRoleJSON object that represents an OAuth2 client digestible by ORY Hydra
func (c *ORYAccessControlPolicyRole) ToORYAccessControlPolicyRoleJSON() *keto.ORYAccessControlPolicyRoleJSON {
id := c.ObjectMeta.Name
if c.Spec.ID != "" {
id = c.Spec.ID
}
return &keto.ORYAccessControlPolicyRoleJSON{
ID: id,
Members: subjectsToStringSlice(c.Spec.Members),
}
}
|
package openstack
import (
"fmt"
"github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
"github.com/gophercloud/gophercloud/pagination"
)
//go:generate faux --interface imageAPI --output fakes/image_api.go
type imageAPI interface {
GetImagesPager() pagination.Pager
PagerToPage(pager pagination.Pager) (pagination.Page, error)
PageToImages(page pagination.Page) ([]images.Image, error)
Delete(imageID string) error
}
type ImagesClient struct {
imageAPI imageAPI
}
func NewImagesClient(imageAPI imageAPI) ImagesClient {
return ImagesClient{
imageAPI: imageAPI,
}
}
func (ic ImagesClient) List() ([]images.Image, error) {
pager := ic.imageAPI.GetImagesPager()
if pager.Err != nil {
return nil, fmt.Errorf("get images pager: %s", pager.Err)
}
page, err := ic.imageAPI.PagerToPage(pager)
if err != nil {
return nil, fmt.Errorf("pager to page: %s", err)
}
imgs, err := ic.imageAPI.PageToImages(page)
if err != nil {
return nil, fmt.Errorf("page to images: %s", err)
}
return imgs, err
}
func (ic ImagesClient) Delete(imageID string) error {
return ic.imageAPI.Delete(imageID)
}
|
package productCategoryController
import (
"errors"
"github.com/gin-gonic/gin"
"github.com/thoas/go-funk"
"hd-mall-ed/packages/admin/models/productCategoryModel"
"hd-mall-ed/packages/common/pkg/adminApp"
"hd-mall-ed/packages/common/pkg/e"
"log"
"strconv"
)
func GetList(c *gin.Context) {
api := adminApp.ApiInit(c)
model := &productCategoryModel.ProductCategory{}
idString := c.DefaultQuery("product_id", "")
id, err := strconv.Atoi(idString)
log.Println("funk.IsEmpty(idString) : ", funk.IsEmpty(idString))
if funk.IsEmpty(idString) {
err = errors.New(e.GetMsg(e.NotFoundId))
api.ResFail(e.NotFoundId)
return
}
model.ProductId = uint(id)
// 查询逻辑
list, err := model.GetList()
if err != nil {
api.ResFailMessage(e.Fail, err.Error())
return
}
api.Response(list)
}
|
package main
import "encoding/binary"
import "io"
import "log"
import "net"
import "math/rand"
import "sync"
import "time"
// Connections object abstracts message sending code for ldr, cas clients
type PacketHeader struct {
PacketID byte
Sequence uint32
Client uint32
KeyLength uint8
DataLength uint16
}
type Packet struct {
Header PacketHeader
Key string
Data []byte
}
type Connection struct {
Conn net.Conn
LastConnectionAttempt time.Time
mu sync.Mutex
}
type Connections struct {
PacketLoss int
connections map[string]*Connection
mu sync.Mutex
}
func (cns *Connections) getConnection(server string) *Connection {
cns.mu.Lock()
defer cns.mu.Unlock()
if cns.connections == nil {
cns.connections = make(map[string]*Connection)
}
if cns.connections[server] == nil {
cns.connections[server] = &Connection{}
}
return cns.connections[server]
}
func (cns *Connections) IsActive(server string) bool {
connection := cns.getConnection(server)
connection.mu.Lock()
defer connection.mu.Unlock()
return connection.Conn != nil
}
func (cns *Connections) Send(server string, packet *Packet) *Packet {
if rand.Intn(100) < cns.PacketLoss {
time.Sleep(2 * time.Second)
return nil
}
packet.Header.KeyLength = uint8(len(packet.Key))
packet.Header.DataLength = uint16(len(packet.Data))
connection := cns.getConnection(server)
connection.mu.Lock()
defer connection.mu.Unlock()
if connection.Conn == nil {
// attempt to connect, but limit to one connection attempt per second
if time.Now().Before(connection.LastConnectionAttempt.Add(time.Second)) {
return nil
}
connection.LastConnectionAttempt = time.Now()
var err error
connection.Conn, err = net.DialTimeout("tcp", server, time.Second)
if err != nil {
log.Printf("warning: error connecting to %s: %s", server, err.Error())
return nil
}
}
binary.Write(connection.Conn, binary.BigEndian, packet.Header)
connection.Conn.Write([]byte(packet.Key))
if len(packet.Data) > 0 {
connection.Conn.Write(packet.Data)
}
response, err := func() (*Packet, error) {
response := new(Packet)
connection.Conn.SetReadDeadline(time.Now().Add(time.Second))
err := binary.Read(connection.Conn, binary.BigEndian, &response.Header)
if err != nil {
return nil, err
}
keyBytes := make([]byte, response.Header.KeyLength)
_, err = io.ReadFull(connection.Conn, keyBytes)
if err != nil {
return nil, err
}
response.Key = string(keyBytes)
if response.Header.DataLength > 0 {
response.Data = make([]byte, response.Header.DataLength)
_, err = io.ReadFull(connection.Conn, response.Data)
if err != nil {
return nil, err
}
}
return response, nil
}()
if err != nil {
log.Printf("warning: error reading from %s: %s", server, err.Error())
connection.Conn.Close()
connection.Conn = nil
return nil
}
return response
}
func (cns *Connections) GetMinResponses(min int, packets []*Packet, servers []string, minimizeCommunication bool, retriesAfterDone int) map[string]*Packet {
// repeatedly try contacting all servers that we haven't gotten a response from yet
// until we have responses from at least min servers
// if minimizeCommunication, then only try to get min iteration on next round
type ServerResponse struct {
Server string
Packet *Packet
}
responses := make(map[string]*Packet)
for len(responses) < min {
numWaiting := 0
doneChan := make(chan ServerResponse)
for _, i := range rand.Perm(len(servers)) {
server := servers[i]
if responses[server] != nil {
continue
}
numWaiting++
go func(server string, packet *Packet) {
response := cns.Send(server, packet)
doneChan <- ServerResponse{server, response}
}(server, packets[i])
if minimizeCommunication && len(responses) + numWaiting >= min {
break
}
}
for i := 0; i < numWaiting; i++ {
response := <- doneChan
if response.Packet != nil {
responses[response.Server] = response.Packet
// don't wait for additional responses if we have enough packets
if len(responses) >= min && retriesAfterDone == 0 {
go func(numWaiting int) {
for i := 0; i < numWaiting; i++ {
<- doneChan
}
}(numWaiting - i)
break
}
}
}
}
for i := 0; i < retriesAfterDone; i++ {
numWaiting := 0
doneChan := make(chan ServerResponse)
for _, server := range servers {
if responses[server] != nil {
continue
}
numWaiting++
go func(server string, packet *Packet) {
response := cns.Send(server, packet)
doneChan <- ServerResponse{server, response}
}(server, packets[i])
}
for i := 0; i < numWaiting; i++ {
response := <- doneChan
if response.Packet != nil {
responses[response.Server] = response.Packet
}
}
}
return responses
}
|
package main
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/go-mysql-org/go-mysql/canal"
"github.com/go-mysql-org/go-mysql/mysql"
"github.com/go-mysql-org/go-mysql/replication"
)
type MyEventHandler struct {
}
func (h *MyEventHandler) OnRotate(header *replication.EventHeader, rotateEvent *replication.RotateEvent) error {
fmt.Printf("OnRotate, header: %+v, event: %+v\n", header, rotateEvent)
return nil
}
func (h *MyEventHandler) OnTableChanged(header *replication.EventHeader, schema string, table string) error {
fmt.Printf("OnTableChanged, header: %+v schema: %s, table: %s\n", header, schema, table)
return nil
}
func (h *MyEventHandler) OnDDL(header *replication.EventHeader, nextPos mysql.Position, queryEvent *replication.QueryEvent) error {
fmt.Printf("OnDDL, header: %+v, nextPos: %+v, queryEvent: %+v\n", nextPos, queryEvent)
return nil
}
func (h *MyEventHandler) OnRow(e *canal.RowsEvent) error {
fmt.Printf("OnRow, event: %+v\n", e)
return nil
}
func (h *MyEventHandler) OnXID(header *replication.EventHeader, nextPos mysql.Position) error {
fmt.Printf("OnXID, header: %+v, nextPos: %+v\n", header, nextPos)
return nil
}
func (h *MyEventHandler) OnGTID(header *replication.EventHeader, gtid mysql.GTIDSet) error {
fmt.Printf("OnGTID, header: %+v, gtid: %+v\n", header, gtid)
return nil
}
func (h *MyEventHandler) OnPosSynced(header *replication.EventHeader, pos mysql.Position, set mysql.GTIDSet, force bool) error {
fmt.Printf("OnPosSynced, header: %+v, pos: %+v, gtidSet: %+v\n, force: %+v", header, pos, set, force)
return nil
}
func (h *MyEventHandler) String() string { return "MyEventHandler" }
func main() {
cfg := new(canal.Config)
cfg.Addr = "127.0.0.1:3306"
cfg.User = "root"
cfg.Password = "root"
cfg.ServerID = uint32(rand.New(rand.NewSource(time.Now().Unix())).Intn(1000)) + 1001
c, err := canal.NewCanal(cfg)
if err != nil {
fmt.Fprintf(os.Stdout, "encounter a error during init canal, and the error: %s", err.Error())
return
}
// Register a handler to handle RowsEvent
c.SetEventHandler(&MyEventHandler{})
// Start canal
c.Run()
select {}
}
|
package svrtest
import (
"errors"
"fmt"
"github.com/devwarrior777/atomicswap/libs"
bnd "github.com/devwarrior777/atomicswap/libs/protobind"
"google.golang.org/grpc/status"
)
func testXZC(testnet bool) error {
// Store and re-use:
// - the address from NewAddress
// - contract and contract-tx from Initiate
// - generated secret hash for initiate, participate
// We are testing the server here!
var address string
var contract string
var contractTx string
var redeemTx string
var secret string
var secretHash string
// ping wallet
pingreq := xzcPingWalletRPCRequest
if testnet {
pingreq = xzcTestnetPingWalletRPCRequest
}
pingresp, err := pingRPC(&pingreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if pingresp.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", pingresp.Errorno, pingresp.Errstr)
}
fmt.Println("Ping success")
// new address
newaddressreq := xzcNewAddressRequest
if testnet {
newaddressreq = xzcTestnetNewAddressRequest
}
newaddress, err := newAddress(&newaddressreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if newaddress.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", newaddress.Errorno, newaddress.Errstr)
}
address = newaddress.Address
fmt.Printf("New address: %s\n", address)
// initiate
secret = libs.GetRand32()
secretHash, err = libs.Hash256(secret)
initiatereq := xzcInitiateRequest
if testnet {
initiatereq = xzcTestnetInitiateRequest
}
initiatereq.Secrethash = secretHash
initiatereq.PartAddress = address
initiate, err := initiate(&initiatereq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if initiate.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", initiate.Errorno, initiate.Errstr)
}
contract = initiate.Contract
contractTx = initiate.ContractTx
if len(contract) < 64 || len(contractTx) < 64 {
return errors.New("invalid contract/contract-tx length(s)")
}
fmt.Printf("Initiate contract: %s...\n", contract[:64])
fmt.Printf("Initiate contract tx: %s...\n", contractTx[:64])
fmt.Printf("Initiate P2SH address: %s\n", initiate.ContractP2Sh)
fmt.Printf("Initiate contract tx hash: %s\n", initiate.ContractTxHash)
fmt.Printf("Initiate fee: %d\n", initiate.Fee)
fmt.Printf("Initiate fee rate: %0.08f/kb\n", initiate.Feerate)
fmt.Printf("Initiate refund locktime: %d\n", initiate.Locktime)
// participate
participatereq := xzcParticipateRequest
if testnet {
participatereq = xzcTestnetParticipateRequest
}
participatereq.Secrethash = secretHash
participatereq.InitAddress = address
participate, err := participate(&participatereq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if participate.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", participate.Errorno, participate.Errstr)
}
if len(participate.Contract) < 64 || len(participate.ContractTx) < 64 {
return errors.New("invalid contract/contract-tx length(s)")
}
fmt.Printf("Participate contract: %s...\n", participate.Contract[:64])
fmt.Printf("Participate contract tx: %s...\n", participate.ContractTx[:64])
fmt.Printf("Participate P2SH address: %s\n", participate.ContractP2Sh)
fmt.Printf("Participate contract tx hash: %s\n", participate.ContractTxHash)
fmt.Printf("Participate fee: %d\n", participate.Fee)
fmt.Printf("Participate fee rate: %0.08f/kb\n", participate.Feerate)
fmt.Printf("Participate refund locktime: %d\n", participate.Locktime)
// audit
auditreq := xzcAuditRequest
if testnet {
auditreq = xzcTestnetAuditRequest
}
auditreq.Contract = contract
auditreq.ContractTx = contractTx
audit, err := audit(&auditreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if audit.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", audit.Errorno, audit.Errstr)
}
fmt.Printf("Audit contract amount: %d\n", audit.ContractAmount)
fmt.Printf("Audit contract address: %s\n", audit.ContractAddress)
fmt.Printf("Audit contract secret hash: %s\n", audit.ContractSecrethash)
fmt.Printf("Audit recipient address: %s\n", audit.RecipientAddress)
fmt.Printf("Audit refund address: %s\n", audit.RefundAddress)
fmt.Printf("Audit refund locktime: %d\n", audit.RefundLocktime)
// redeem
redeemreq := xzcRedeemRequest
if testnet {
redeemreq = xzcTestnetRedeemRequest
}
redeemreq.Secret = secret
redeemreq.Contract = contract
redeemreq.ContractTx = contractTx
redeem, err := redeem(&redeemreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if redeem.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", redeem.Errorno, redeem.Errstr)
}
redeemTx = redeem.RedeemTx
if len(redeemTx) < 64 {
return errors.New("invalid contract/contract-tx length(s)")
}
fmt.Printf("Redeem contract: %s...\n", redeemTx[:64])
fmt.Printf("Redeem contract tx: %s...\n", redeem.RedeemTxHash)
fmt.Printf("Redeem fee: %d\n", redeem.Fee)
fmt.Printf("Redeem fee rate: %0.08f/kb\n", redeem.Feerate)
// extractSecret
extractsecretreq := xzcExtractSecretRequest
if testnet {
extractsecretreq = xzcTestnetExtractSecretRequest
}
extractsecretreq.CpRedemptionTx = redeemTx
extractsecretreq.Secrethash = secretHash
extract, err := extractSecret(&extractsecretreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if extract.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", extract.Errorno, extract.Errstr)
}
fmt.Printf("ExtractSecret secret: %s\n", extract.Secret)
// refund
refundreq := xzcRefundRequest
if testnet {
refundreq = xzcTestnetRefundRequest
}
refundreq.Contract = contract
refundreq.ContractTx = contractTx
refund, err := refund(&refundreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if refund.Errorno != bnd.ERRNO_OK {
return fmt.Errorf("%v %s", refund.Errorno, refund.Errstr)
}
if len(refund.RefundTx) < 64 {
return errors.New("invalid contract/contract-tx length(s)")
}
fmt.Printf("Refund tx: %s...\n", refund.RefundTx[:64])
fmt.Printf("Refund tx hash: %s...\n", refund.RefundTxHash)
fmt.Printf("Refund fee: %d\n", refund.Fee)
fmt.Printf("Refund fee rate: %0.08f/kb\n", refund.Feerate)
// publish
//
// This a negative test since we do not want to boadcast a transaction to
// the network.
//
// It tests that the test client can reach and call the wallet node publish
// function through the gRPC server
//
publishreq := xzcPublishRequest
if testnet {
publishreq = xzcTestnetPublishRequest
}
publishreq.Tx = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
publish, err := publish(&publishreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if publish.Errorno == bnd.ERRNO_OK {
// if here it is an error in the lib
fmt.Printf("Publish contract: %s...\n", publish.TxHash)
return errors.New("published invalid transaction")
}
fmt.Printf("Expected error publishing invalid transaction: %v %s\n", publish.Errorno, publish.Errstr)
// gettx
//
// This a negative test again
//
// It tests that the test client can reach and call the wallet node gettx
// function through the gRPC server
//
gettxreq := xzcGetTxRequest
if testnet {
gettxreq = xzcTestnetGetTxRequest
}
gettxreq.Txid = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
gettx, err := gettx(&gettxreq)
if err != nil {
s := status.Convert(err)
return fmt.Errorf("status: %d - %v - %v", s.Code(), s.Code(), s.Message())
}
if gettx.Errorno == bnd.ERRNO_OK {
// if here it is an error in the lib
fmt.Printf("GetTx Blockhash: %s...\n", gettx.Blockhash)
return errors.New("got info from an invalid txid")
}
fmt.Printf("Expected error getting info from an invalid txid: %v %s\n", gettx.Errorno, gettx.Errstr)
return nil
}
|
package eval
// stop go build from complaining about "no non-test Go files" in directory
|
package main
import (
"encoding/json"
"fmt"
"log"
// "net/url"
"os"
"strings"
"net/http"
"bufio"
"strconv"
"bytes"
)
const serverURL = "http://localhost:8080/"
var in *bufio.Reader
type Torrent struct {
Title string
Description string
MagnetLink string
Size string
Downloads int
Seeders int
Leechers int
}
func printTorrent (t *Torrent) {
fmt.Println("Title: ", t.Title)
fmt.Println("Description: ", t.Description)
fmt.Println("Size: ", t.Size)
fmt.Println("Total number of downloads: ", t.Downloads)
fmt.Println("Seeders: ", t.Seeders)
fmt.Println("Leechers: ", t.Leechers)
fmt.Println("Magnet Link: ", t.MagnetLink)
fmt.Printf("\n\n")
}
func get() {
resp, err := http.Get(serverURL+"torrents")
if err != nil {
log.Fatalf("could not fetch: %v", err)
}
fmt.Println(resp)
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Println("Erro 1")
return
}
var result map[string]*Torrent
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
fmt.Println("Erro 2")
return
}
for _, tor := range result {
printTorrent(tor)
}
}
func getWithKey(key string) {
resp, err := http.Get(serverURL+"torrent/"+strings.TrimSuffix(key, "\n"))
if err != nil {
log.Fatalf("Falha: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Println("Torrent não encontrado")
return
}
var result Torrent
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
fmt.Println("Falha de decodificação")
return
}
printTorrent(&result)
}
func post() {
var postT Torrent
fmt.Println("Nome:")
aux, _ := in.ReadString('\n')
postT.Title = strings.TrimSuffix(aux, "\n")
fmt.Println("Descrição:")
aux, _ = in.ReadString('\n')
postT.Description = strings.TrimSuffix(aux, "\n")
fmt.Println("Magnet Link:")
aux, _ = in.ReadString('\n')
postT.MagnetLink = strings.TrimSuffix(aux, "\n")
fmt.Println("Tamanho:")
aux, _ = in.ReadString('\n')
postT.Size = strings.TrimSuffix(aux, "\n")
fmt.Println("Total de Downloads:")
aux, _ = in.ReadString('\n')
postT.Downloads, _ = strconv.Atoi(strings.TrimSuffix(aux, "\n"))
fmt.Println("Seeders:")
aux, _ = in.ReadString('\n')
postT.Seeders, _ = strconv.Atoi(strings.TrimSuffix(aux, "\n"))
fmt.Println("Leechers:")
aux, _ = in.ReadString('\n')
postT.Leechers, _ = strconv.Atoi(strings.TrimSuffix(aux, "\n"))
postJSON, _ := json.Marshal(postT)
resp, err := http.Post(serverURL+"torrent/"+postT.Title, "application/json", bytes.NewBuffer(postJSON))
if err != nil {
log.Fatalf("Erro na operação de POST: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
fmt.Println("Erro ao criar torrent: ", resp.StatusCode)
return
}
}
func put() {
var putT Torrent
fmt.Println("Torrent para atualizar:")
aux, _ := in.ReadString('\n')
key := strings.TrimSuffix(aux, "\n")
fmt.Println("Nova descrição:")
aux, _ = in.ReadString('\n')
putT.Description = strings.TrimSuffix(aux, "\n")
putJSON, _ := json.Marshal(putT)
req, err := http.NewRequest("PUT", serverURL+"torrent/"+key, bytes.NewBuffer(putJSON))
if err != nil {
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatalf("could not fetch: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
fmt.Println("Torrent não encontrado.")
return
}
}
func delete(key string) {
req, err := http.NewRequest("DELETE", serverURL+"torrent/"+key, nil)
if err != nil {
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatalf("could not fetch: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Println("Torrent não encontrado.")
return
}
}
func main () {
in = bufio.NewReader(os.Stdin)
var op byte
for {
fmt.Println("\n MENU")
fmt.Println("1 - Todos os torrents")
fmt.Println("2 - Procurar torrent por nome")
fmt.Println("3 - Post torrent")
fmt.Println("4 - Delete torrent")
fmt.Println("5 - Atualizar descrição do torrent")
fmt.Println("6 - Sair")
fmt.Scanf("%c\n", &op)
switch op {
case '1':
get()
break
case '2':
fmt.Println("Nome do torrent:")
key, _ := in.ReadString('\n')
getWithKey(key)
break
case '3':
fmt.Println("Criar novo torrent")
post()
break
case '4':
fmt.Println("Nome do torrent para deletar:")
key, _ := in.ReadString('\n')
delete(key)
break
case '5':
put()
break
case '6':
os.Exit(0)
}
}
}
|
package main
import "fmt"
type hotdog int
var x hotdog
func main() {
fmt.Println(x) //zero value
fmt.Printf("%T\n",x) //Printf() %T = type of variable x
x = 42 //equal operator
fmt.Println(x) //value stored in variable
}
//printf = use to print a "type of variable" which this is %T
//UNDERLYING TYPE - see link below, is terminology used to
//research creating your own type, as in this example
//Hands-on exercise #4
//FYI - nice documentation and new terminology “underlying type”
//https://golang.org/ref/spec#Types
//For this exercise
//1. Create your own type. Have the underlying type be an int.
//2. create a VARIABLE of your new TYPE with the IDENTIFIER “x” using
//the “VAR” keyword
//3. in func main
//a. print out the value of the variable “x”
//b. print out the type of the variable “x”
//c. assign 42 to the VARIABLE “x” using the “=” OPERATOR
//d. print out the value of the variable “x”
//code: here’s the solution: https://play.golang.org/p/snm4WuuYmG |
package main
import (
"strconv"
"strings"
"fmt"
)
/*
Additive number is a string whose digits can form additive sequence.
A valid additive sequence should contain at least three numbers. Except for the first two numbers, each subsequent number in the sequence must be the sum of the preceding two.
Given a string containing only digits '0'-'9', write a function to determine if it's an additive number.
Note: Numbers in the additive sequence cannot have leading zeros, so sequence 1, 2, 03 or 1, 02, 3 is invalid.
Example 1:
Input: "112358"
Output: true
Explanation: The digits can form an additive sequence: 1, 1, 2, 3, 5, 8.
1 + 1 = 2, 1 + 2 = 3, 2 + 3 = 5, 3 + 5 = 8
Example 2:
Input: "199100199"
Output: true
Explanation: The additive sequence is: 1, 99, 100, 199.
1 + 99 = 100, 99 + 100 = 199
Follow up:
How would you handle overflow for very large input integers?
*/
func isAdditiveNumber(num string) bool {
for i:=0;i<len(num);i++ {
// 判断是否有前置0
if i > 0 && num[0]=='0' {
continue
}
one,_ := strconv.Atoi(num[:i+1])
for j:=i+2;j<len(num);j++ {
// 判断是否有前置0
if j-i-1 > 1 && num[i+1]=='0' {
continue
}
two,_ := strconv.Atoi(num[i+1:j])
if bt(one,two,num[j:]) {
return true
}
}
}
return false
}
func bt(one,two int, num string) bool {
if len(num) <= 0 {
return true
}
f := strconv.Itoa(one + two)
if strings.HasPrefix(num,f) {
return bt(two,one+two,num[len(f):])
}
return false
}
func main() {
fmt.Println(isAdditiveNumber("199100199"))
fmt.Println(isAdditiveNumber("112358"))
fmt.Println(isAdditiveNumber("000"))
fmt.Println(isAdditiveNumber("0"))
fmt.Println(isAdditiveNumber("00"))
fmt.Println(isAdditiveNumber("012"))
fmt.Println(isAdditiveNumber("0235813"))
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package encryption
import (
"bytes"
"context"
"io"
"strings"
"testing"
"storj.io/common/ranger"
)
func TestPad(t *testing.T) {
for examplenum, example := range []struct {
data string
blockSize int
padding int
}{
{"abcdef", 24, 24 - 6},
{"abcdef", 6, 6},
{"abcdef", 7, 8},
{"abcdef", 8, 10},
{"abcdef", 9, 12},
{"abcdef", 10, 4},
{"abcdef", 11, 5},
{"abcde", 7, 9},
{"abcdefg", 7, 7},
{"abcdef", 512, 506},
{"abcdef", 32 * 1024, 32*1024 - 6},
{"", 32 * 1024, 32 * 1024},
{strings.Repeat("\x00", 16*1024), 32 * 1024, 16 * 1024},
{strings.Repeat("\x00", 32*1024+1), 32 * 1024, 32*1024 - 1},
} {
ctx := context.Background()
padded, padding := Pad(ranger.ByteRanger([]byte(example.data)),
example.blockSize)
if padding != example.padding {
t.Fatalf("invalid padding: %d, %v != %v", examplenum,
padding, example.padding)
}
if int64(padding+len(example.data)) != padded.Size() {
t.Fatalf("invalid padding")
}
unpadded, err := Unpad(padded, padding)
if err != nil {
t.Fatalf("unexpected error")
}
r, err := unpadded.Range(ctx, 0, unpadded.Size())
if err != nil {
t.Fatalf("unexpected error")
}
data, err := io.ReadAll(r)
if err != nil {
t.Fatalf("unexpected error")
}
if !bytes.Equal(data, []byte(example.data)) {
t.Fatalf("mismatch")
}
unpadded, err = UnpadSlow(ctx, padded)
if err != nil {
t.Fatalf("unexpected error")
}
r, err = unpadded.Range(ctx, 0, unpadded.Size())
if err != nil {
t.Fatalf("unexpected error")
}
data, err = io.ReadAll(r)
if err != nil {
t.Fatalf("unexpected error")
}
if !bytes.Equal(data, []byte(example.data)) {
t.Fatalf("mismatch")
}
}
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"os/exec"
sw "github.com/sah4ez/go-bitbucket"
"github.com/urfave/cli"
)
func Review(client *sw.APIClient) cli.Command {
return cli.Command{
Name: "review",
Aliases: []string{"r"},
Description: "start review",
Action: func(c *cli.Context) error {
prs, _, err := client.PullrequestsApi.RepositoriesUsernameRepoSlugPullrequestsGet(Auth, Company, Repo, map[string]interface{}{"state": "OPEN"})
if err != nil {
return err
}
if LeftBranch == "" || RightBranch == "" {
return fmt.Errorf("missing LEFT_BRANCH or RIGHT_BRANCH env varibale for compare. LEFT_BRANCH default master")
}
for _, pr := range prs.Values {
if RightBranch == pr.Source.Branch.Name && LeftBranch == pr.Destination.Branch.Name {
files, err := LoadFiles()
if err != nil {
return err
}
for _, file := range files {
fmt.Println("id", pr.Id)
Diff(file)
}
}
}
return nil
},
}
}
func Diff(file string) {
os.Setenv(EnvCurrentFilenameDiff, file)
defer os.Unsetenv(EnvCurrentFilenameDiff)
cmd := exec.Command("git", "difftool", "--tool=vimdiff2", "origin/"+LeftBranch, "origin/"+RightBranch, file)
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
r := bufio.NewReader(os.Stdout)
err := cmd.Start()
if err != nil {
log.Fatal(err)
}
reader := bufio.NewReader(os.Stdin)
line, _, err := r.ReadLine()
for err != nil {
fmt.Println(line)
text, _ := reader.ReadString('\r')
os.Stdin.Write([]byte(text + "\n"))
line, _, err = r.ReadLine()
}
cmd.Wait()
}
|
package todo
import (
"errors"
"github.com/t-ash0410/tdd-sample/backend/internal/api/todo/entities"
"github.com/t-ash0410/tdd-sample/backend/internal/api/todo/interfaces"
"github.com/t-ash0410/tdd-sample/backend/test/mock"
)
type Repository struct {
ctx *mock.InMemoryContext
}
func NewRepository(ctx *mock.InMemoryContext) interfaces.IRepository {
return &Repository{
ctx,
}
}
func (repo *Repository) List(dst *[]entities.Task) error {
for _, v := range repo.ctx.Data {
entity := v.(entities.Task)
*dst = append(*dst, entity)
}
return nil
}
func (repo *Repository) Add(task entities.Task) error {
repo.ctx.Data = append(repo.ctx.Data, task)
return nil
}
type ErrorRepository struct {
}
func NewErrorRepository() interfaces.IRepository {
return &ErrorRepository{}
}
func (repo *ErrorRepository) List(dst *[]entities.Task) error {
return errors.New("some error")
}
func (repo *ErrorRepository) Add(task entities.Task) error {
return errors.New("some error")
}
|
package lloyd
const (
defaultServerName = "red"
XRequestIDHeader = "X-Request-ID"
)
|
package main
import (
"fmt"
"github.com/danjac/go-angular-demo/api"
"log"
"net/http"
"os"
)
func getEnvOrDie(name string) string {
value := os.Getenv(name)
if value == "" {
log.Fatal(fmt.Sprintf("%s is missing", name))
}
return value
}
func getEnvOrDefault(name string, defaultValue string) string {
value := os.Getenv(name)
if value == "" {
return defaultValue
}
return value
}
func main() {
env := getEnvOrDefault("ENV", "development")
config := &api.Config{
DbName: getEnvOrDie("DB_NAME"),
DbUser: getEnvOrDie("DB_USER"),
DbPassword: getEnvOrDie("DB_PASS"),
LogPrefix: getEnvOrDefault("LOG_PREFIX", "myapp"),
SecretKey: getEnvOrDie("SECRET_KEY"),
ApiPrefix: "/api",
ServeStatic: env == "development",
StaticPrefix: "/",
StaticDir: "./public/",
}
app, err := api.NewApp(config)
if err != nil {
log.Fatal(err)
}
defer app.Shutdown()
http.Handle("/", app.Handler)
// SERVER
port := getEnvOrDefault("PORT", "3000")
http.ListenAndServe(":"+port, nil)
}
|
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this
// file, you can obtain one at https://opensource.org/licenses/MIT.
//
// Copyright (c) DUSK NETWORK. All rights reserved.
package user
import (
"encoding/binary"
"math"
"math/big"
"github.com/dusk-network/dusk-blockchain/pkg/util/nativeutils/sortedset"
"github.com/dusk-network/dusk-crypto/hash"
log "github.com/sirupsen/logrus"
)
// DUSK is one whole unit of DUSK. This is duplicated from wallet since
// otherwise we get into an import cycle including the transactions and users
// packages.
const DUSK = uint64(100000000)
// VotingCommittee represents a set of provisioners with voting rights at a certain
// point in the consensus. The set is sorted by the int value of the public key in
// increasing order (higher last).
type VotingCommittee struct {
sortedset.Cluster
}
func newCommittee() *VotingCommittee {
return &VotingCommittee{
Cluster: sortedset.NewCluster(),
}
}
// Size returns how many members there are in a VotingCommittee.
func (v VotingCommittee) Size() int {
return v.TotalOccurrences()
}
// MemberKeys returns the BLS public keys of all the members in a VotingCommittee.
func (v VotingCommittee) MemberKeys() [][]byte {
return v.Unravel()
}
// Equal checks if two VotingCommittees are the same.
func (v VotingCommittee) Equal(other *VotingCommittee) bool {
return v.Cluster.Equal(other.Cluster)
}
// IsMember checks if `pubKeyBLS` is within the VotingCommittee.
func (v VotingCommittee) IsMember(pubKeyBLS []byte) bool {
_, found := v.IndexOf(pubKeyBLS)
return found
}
// createSortitionMessage will return the hash of the passed sortition information.
func createSortitionHash(round uint64, step uint8, i int) ([]byte, error) {
msg := make([]byte, 12)
binary.LittleEndian.PutUint64(msg[:8], round)
binary.LittleEndian.PutUint32(msg[8:12], uint32(i))
msg = append(msg, step)
return hash.Sha3256(msg)
}
// Generate a score from the given hash and total stake weight.
func generateSortitionScore(hash []byte, W *big.Int) uint64 {
hashNum := new(big.Int).SetBytes(hash)
return new(big.Int).Mod(hashNum, W).Uint64()
}
// CreateVotingCommittee will run the deterministic sortition function, which determines
// who will be in the committee for a given step and round.
// TODO: running this with weird setup causes infinite looping (to reproduce, hardcode `3` on MockProvisioners when calling agreement.NewHelper in the agreement tests).
func (p Provisioners) CreateVotingCommittee(round uint64, step uint8, size int) VotingCommittee {
votingCommittee := newCommittee()
W := new(big.Int).SetUint64(p.TotalWeight())
// Deep copy the Members map, to avoid mutating the original set.
members := copyMembers(p.Members)
p.Members = members
// Remove stakes which have not yet become active, or have expired
for _, m := range p.Members {
i := 0
for {
if i == len(m.Stakes) {
break
}
if m.Stakes[i].StartHeight > round || m.Stakes[i].EndHeight < round {
subtractFromTotalWeight(W, m.Stakes[i].Amount)
m.RemoveStake(i)
continue
}
i++
}
}
for i := 0; votingCommittee.Size() < size; i++ {
if W.Uint64() == 0 {
// We ran out of staked DUSK, so we return the result prematurely
break
}
hashSort, err := createSortitionHash(round, step, i)
if err != nil {
log.Panic(err)
}
score := generateSortitionScore(hashSort, W)
blsPk := p.extractCommitteeMember(score)
votingCommittee.Insert(blsPk)
// Subtract up to one DUSK from the extracted committee member.
m := p.GetMember(blsPk)
subtracted := m.SubtractFromStake(1 * DUSK)
// Also subtract the subtracted amount from the total weight, to ensure
// consistency.
subtractFromTotalWeight(W, subtracted)
}
return *votingCommittee
}
// extractCommitteeMember walks through the committee set, while deducting
// each node's stake from the passed score until we reach zero. The public key
// of the node that the function ends on will be returned as a hexadecimal string.
func (p Provisioners) extractCommitteeMember(score uint64) []byte {
var m *Member
var e error
for i := 0; ; i++ {
if m, e = p.MemberAt(i); e != nil {
// handling the eventuality of an out of bound error
m, e = p.MemberAt(0)
if e != nil {
// FIXME: shall this panic ?
log.Panic(e)
}
i = 0
}
stake, err := p.GetStake(m.PublicKeyBLS)
if err != nil {
// If we get an error from GetStake, it means we either got a public key of a
// provisioner who is no longer in the set, or we got a malformed public key.
// We can't repair our committee on the fly, so we have to panic.
log.Panic(err)
}
if stake >= score {
return m.PublicKeyBLS
}
score -= stake
}
}
// GenerateCommittees pre-generates an `amount` of VotingCommittee of a specified `size` from a given `step`.
func (p Provisioners) GenerateCommittees(round uint64, amount, step uint8, size int) []VotingCommittee {
if step >= math.MaxUint8-amount {
amount = math.MaxUint8 - step
}
committees := make([]VotingCommittee, amount)
for i := 0; i < int(amount); i++ {
votingCommittee := p.CreateVotingCommittee(round, step+uint8(i), size)
committees[i] = votingCommittee
}
return committees
}
func subtractFromTotalWeight(W *big.Int, amount uint64) {
if W.Uint64() > amount {
W.Sub(W, big.NewInt(int64(amount)))
return
}
W.Set(big.NewInt(0))
}
// Deep copy a Members map. Since slices are treated as 'reference types' by Go, we
// need to iterate over, and individually copy each Stake to the new Member struct,
// to avoid mutating the original set.
func copyMembers(members map[string]*Member) map[string]*Member {
m := make(map[string]*Member)
for k, v := range members {
member := &Member{
PublicKeyBLS: v.PublicKeyBLS,
}
member.Stakes = append(member.Stakes, v.Stakes...)
m[k] = member
}
return m
}
|
package worker
import (
"time"
gocontext "context"
"github.com/cenk/backoff"
"github.com/mitchellh/multistep"
"github.com/travis-ci/worker/context"
"go.opencensus.io/trace"
)
type stepGenerateScript struct {
generator BuildScriptGenerator
}
func (s *stepGenerateScript) Run(state multistep.StateBag) multistep.StepAction {
buildJob := state.Get("buildJob").(Job)
ctx := state.Get("ctx").(gocontext.Context)
defer context.TimeSince(ctx, "step_generate_script_run", time.Now())
ctx, span := trace.StartSpan(ctx, "GenerateScript.Run")
defer span.End()
logger := context.LoggerFromContext(ctx).WithField("self", "step_generate_script")
b := backoff.NewExponentialBackOff()
b.MaxInterval = 10 * time.Second
b.MaxElapsedTime = time.Minute
var script []byte
var err error
switch job := buildJob.(type) {
case BuildScriptGenerator:
logger.Info("using job to get script")
script, err = job.Generate(ctx, buildJob)
default:
logger.Info("using build script generator to generate script")
err = backoff.Retry(func() (err error) {
script, err = s.generator.Generate(ctx, buildJob)
return
}, b)
}
if err != nil {
state.Put("err", err)
span.SetStatus(trace.Status{
Code: trace.StatusCodeUnavailable,
Message: err.Error(),
})
logger.WithField("err", err).Error("couldn't generate build script, erroring job")
err := buildJob.Error(ctx, "An error occurred while generating the build script.")
if err != nil {
logger.WithField("err", err).Error("couldn't requeue job")
}
return multistep.ActionHalt
}
logger.Info("generated script")
state.Put("script", script)
return multistep.ActionContinue
}
func (s *stepGenerateScript) Cleanup(multistep.StateBag) {
// Nothing to clean up
}
|
package rest
import (
"fmt"
"net/http"
"go/types"
"github.com/json-iterator/go"
)
type IController interface {
IBean
RegisterRoutes() map[string]func(writer http.ResponseWriter,request *http.Request)
HandleRoutes(writer http.ResponseWriter, request *http.Request)(func(writer http.ResponseWriter, request *http.Request),bool)
}
type BaseController struct {
Bean
userDao *UserDao
sessionDao *SessionDao
}
func (this *BaseController) Init(context *Context) {
this.Bean.Init(context)
b:= context.GetBean(this.userDao)
if b,ok := b.(*UserDao);ok{
this.userDao = b
}
b = context.GetBean(this.sessionDao)
if b, ok := b.(*SessionDao); ok {
this.sessionDao = b
}
}
func (this *BaseController) RegisterRoutes() map[string]func(writer http.ResponseWriter,request *http.Request){
LogDebug("BaseController RegisterRouters")
return make(map[string]func(writer http.ResponseWriter, request *http.Request))
}
func (this *BaseController) HandleRoutes(writer http.ResponseWriter, request *http.Request) (func(writer http.ResponseWriter, request *http.Request), bool) {
return nil, false
}
//需要进行登录验证的wrap包装
func (this *BaseController) Wrap(f func(writer http.ResponseWriter, request *http.Request) *WebResult, qualifiedRole string) func(w http.ResponseWriter, r *http.Request){
return func(writer http.ResponseWriter, request *http.Request) {
var webResult *WebResult = nil
if qualifiedRole != USER_ROLE_GUEST{
webResult = f(writer, request)
}else{
webResult = f(writer, request)
}
//输出的是json格式
if webResult != nil {
//返回的内容申明是json,utf-8
writer.Header().Set("Content-Type", "application/json;charset=UTF-8")
//用json的方式输出返回值。
var json = jsoniter.ConfigCompatibleWithStandardLibrary
b, _ := json.Marshal(webResult)
if webResult.Code == RESULT_CODE_OK {
writer.WriteHeader(http.StatusOK)
} else {
writer.WriteHeader(http.StatusBadRequest)
}
fmt.Fprintf(writer, string(b))
} else {
//输出的内容是二进制的。
}
}
}
//返回成功的结果。
func (this *BaseController) Success(data interface{}) *WebResult {
var webResult *WebResult = nil
if value, ok := data.(string); ok {
webResult = &WebResult{Code: RESULT_CODE_OK, Msg: value}
} else if value, ok := data.(*WebResult); ok {
webResult = value
} else if _, ok := data.(types.Nil); ok {
webResult = ConstWebResult(RESULT_CODE_OK)
} else {
webResult = &WebResult{Code: RESULT_CODE_OK, Data: data}
}
return webResult
}
//返回错误的结果。
func (this *BaseController) Error(err interface{}) *WebResult {
var webResult *WebResult = nil
if value, ok := err.(string); ok {
webResult = &WebResult{Code: RESULT_CODE_UTIL_EXCEPTION, Msg: value}
} else if value, ok := err.(int); ok {
webResult = ConstWebResult(value)
} else if value, ok := err.(*WebResult); ok {
webResult = value
} else if value, ok := err.(error); ok {
webResult = &WebResult{Code: RESULT_CODE_UTIL_EXCEPTION, Msg: value.Error()}
} else {
webResult = &WebResult{Code: RESULT_CODE_UTIL_EXCEPTION, Msg: "服务器未知错误"}
}
return webResult
} |
// Copyright 2017 Walter Schulze
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"encoding/json"
"fmt"
"testing"
)
func TestCurry2(t *testing.T) {
curried := deriveCurryMarshal(json.Unmarshal)
got := ""
want := `string`
if err := curried([]byte(`"` + want + `"`))(&got); err != nil {
t.Fatal(err)
}
if got != want {
t.Fatalf("got %s != want %s", got, want)
}
}
func TestCurry3(t *testing.T) {
f := func(a int, b string, c bool) string {
return fmt.Sprintf("%d%s%v", a, b, c)
}
curried := deriveCurry3(f)
want := `1atrue`
got := curried(1)("a", true)
if got != want {
t.Fatalf("got %s != want %s", got, want)
}
}
func TestCurryCurried(t *testing.T) {
f := func(a int, b string, c bool) string {
return fmt.Sprintf("%d%s%v", a, b, c)
}
curried := deriveCurry3(f)
want := `1atrue`
gotcurried := curried(1)
currycurried := deriveCurryCurried(gotcurried)
got := currycurried("a")(true)
if got != want {
t.Fatalf("got %s != want %s", got, want)
}
}
func TestCurryBlankIdentifier(t *testing.T) {
f := func(param_1 string, _ bool, param_0 int) string {
return fmt.Sprintf("%s%v%d", param_1, true, param_0)
}
curried := deriveCurryBlackIdentifier(f)
want := `atrue1`
got := curried("a")(false, 1)
if got != want {
t.Fatalf("got %s != want %s", got, want)
}
}
|
package common
import (
"net/http"
"os"
"strings"
)
//GetRequestURL 获取请求的URL
func GetRequestURL(request *http.Request) string {
scheme := "http://"
if request.TLS != nil {
scheme = "https://"
}
return strings.Join([]string{scheme, request.Host, request.RequestURI}, "")
}
//PathExist 路径是否存在
func PathExist(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"context"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/cockroach/pkg/sql/opt/invertedidx"
"github.com/cockroachdb/cockroach/pkg/sql/rowcontainer"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/optional"
"github.com/cockroachdb/errors"
)
// invertedFilterState represents the state of the processor.
type invertedFiltererState int
const (
ifrStateUnknown invertedFiltererState = iota
// ifrReadingInput means that the inverted index rows are being read from
// the input.
ifrReadingInput
// ifrEmittingRows means we are emitting the results of the evaluation.
ifrEmittingRows
)
type invertedFilterer struct {
execinfra.ProcessorBase
runningState invertedFiltererState
input execinfra.RowSource
invertedColIdx uint32
diskMonitor *mon.BytesMonitor
rc *rowcontainer.DiskBackedNumberedRowContainer
invertedEval batchedInvertedExprEvaluator
// The invertedEval result.
evalResult []KeyIndex
// The next result row, i.e., evalResult[resultIdx].
resultIdx int
// Scratch space for constructing the PK row to feed to rc.
keyRow rowenc.EncDatumRow
// Scratch space for constructing the output row.
outputRow rowenc.EncDatumRow
}
var _ execinfra.Processor = &invertedFilterer{}
var _ execinfra.RowSource = &invertedFilterer{}
var _ execinfra.OpNode = &invertedFilterer{}
const invertedFiltererProcName = "inverted filterer"
func newInvertedFilterer(
flowCtx *execinfra.FlowCtx,
processorID int32,
spec *execinfrapb.InvertedFiltererSpec,
input execinfra.RowSource,
post *execinfrapb.PostProcessSpec,
output execinfra.RowReceiver,
) (execinfra.RowSourcedProcessor, error) {
ifr := &invertedFilterer{
input: input,
invertedColIdx: spec.InvertedColIdx,
invertedEval: batchedInvertedExprEvaluator{
exprs: []*inverted.SpanExpressionProto{&spec.InvertedExpr},
},
}
// The RowContainer columns are all columns other than the inverted column.
// The output has the same types as the input.
outputColTypes := input.OutputTypes()
rcColTypes := make([]*types.T, len(outputColTypes)-1)
copy(rcColTypes, outputColTypes[:ifr.invertedColIdx])
copy(rcColTypes[ifr.invertedColIdx:], outputColTypes[ifr.invertedColIdx+1:])
ifr.keyRow = make(rowenc.EncDatumRow, len(rcColTypes))
ifr.outputRow = make(rowenc.EncDatumRow, len(outputColTypes))
ifr.outputRow[ifr.invertedColIdx].Datum = tree.DNull
// Initialize ProcessorBase.
if err := ifr.ProcessorBase.Init(
ifr, post, outputColTypes, flowCtx, processorID, output, nil, /* memMonitor */
execinfra.ProcStateOpts{
InputsToDrain: []execinfra.RowSource{ifr.input},
TrailingMetaCallback: func() []execinfrapb.ProducerMetadata {
ifr.close()
return nil
},
},
); err != nil {
return nil, err
}
ctx := flowCtx.EvalCtx.Ctx()
// Initialize memory monitor and row container for input rows.
ifr.MemMonitor = execinfra.NewLimitedMonitor(ctx, flowCtx.EvalCtx.Mon, flowCtx.Cfg, "inverter-filterer-limited")
ifr.diskMonitor = execinfra.NewMonitor(ctx, flowCtx.DiskMonitor, "inverted-filterer-disk")
ifr.rc = rowcontainer.NewDiskBackedNumberedRowContainer(
true, /* deDup */
rcColTypes,
ifr.EvalCtx,
ifr.FlowCtx.Cfg.TempStorage,
ifr.MemMonitor,
ifr.diskMonitor,
)
if execinfra.ShouldCollectStats(flowCtx.EvalCtx.Ctx(), flowCtx) {
ifr.input = newInputStatCollector(ifr.input)
ifr.ExecStatsForTrace = ifr.execStatsForTrace
}
if spec.PreFiltererSpec != nil {
semaCtx := flowCtx.TypeResolverFactory.NewSemaContext(flowCtx.EvalCtx.Txn)
var exprHelper execinfrapb.ExprHelper
colTypes := []*types.T{spec.PreFiltererSpec.Type}
if err := exprHelper.Init(spec.PreFiltererSpec.Expression, colTypes, semaCtx, ifr.EvalCtx); err != nil {
return nil, err
}
preFilterer, preFiltererState, err := invertedidx.NewBoundPreFilterer(
spec.PreFiltererSpec.Type, exprHelper.Expr)
if err != nil {
return nil, err
}
ifr.invertedEval.filterer = preFilterer
ifr.invertedEval.preFilterState = append(ifr.invertedEval.preFilterState, preFiltererState)
}
// TODO(sumeer): for expressions that only involve unions, and the output
// does not need to be in key-order, we should incrementally output after
// de-duping. It will reduce the container memory/disk by 2x.
// Prepare inverted evaluator for later evaluation.
_, err := ifr.invertedEval.init()
if err != nil {
return nil, err
}
return ifr, nil
}
// Next is part of the RowSource interface.
func (ifr *invertedFilterer) Next() (rowenc.EncDatumRow, *execinfrapb.ProducerMetadata) {
// - Read all the input and add to the row container (with de-duping), and feed it
// to the invertedEval.
// - Evaluate the inverted expression
// - Retrieve the results and for each row evaluate the ON expression and output.
for ifr.State == execinfra.StateRunning {
var row rowenc.EncDatumRow
var meta *execinfrapb.ProducerMetadata
switch ifr.runningState {
case ifrReadingInput:
ifr.runningState, meta = ifr.readInput()
case ifrEmittingRows:
ifr.runningState, row, meta = ifr.emitRow()
default:
log.Fatalf(ifr.Ctx, "unsupported state: %d", ifr.runningState)
}
if row == nil && meta == nil {
continue
}
if meta != nil {
return nil, meta
}
if outRow := ifr.ProcessRowHelper(row); outRow != nil {
return outRow, nil
}
}
return nil, ifr.DrainHelper()
}
func (ifr *invertedFilterer) readInput() (invertedFiltererState, *execinfrapb.ProducerMetadata) {
row, meta := ifr.input.Next()
if meta != nil {
if meta.Err != nil {
ifr.MoveToDraining(nil /* err */)
return ifrStateUnknown, meta
}
return ifrReadingInput, meta
}
if row == nil {
log.VEventf(ifr.Ctx, 1, "no more input rows")
evalResult := ifr.invertedEval.evaluate()
ifr.rc.SetupForRead(ifr.Ctx, evalResult)
// invertedEval had a single expression in the batch, and the results
// for that expression are in evalResult[0].
ifr.evalResult = evalResult[0]
return ifrEmittingRows, nil
}
// Replace missing values with nulls to appease the row container.
for i := range row {
if row[i].IsUnset() {
row[i].Datum = tree.DNull
}
}
// Add to the evaluator.
//
// NB: Inverted columns are custom encoded in a manner that does not
// correspond to Datum encoding, and in the code here we only want the encoded
// bytes. We have two possibilities with what the provider of this row has
// done:
// 1. Not decoded the row: This is the len(enc) > 0 case.
// 2. Decoded the row, but special-cased the inverted column by stuffing the
// encoded bytes into a "decoded" DBytes: This is the len(enc) == 0 case.
enc := row[ifr.invertedColIdx].EncodedBytes()
if len(enc) == 0 {
// If the input is from the vectorized engine, the encoded bytes may be
// empty (case 2 above). In this case, the Datum should contain the encoded
// key as a DBytes. The Datum should never be DNull since nulls aren't
// stored in inverted indexes.
if row[ifr.invertedColIdx].Datum == nil {
ifr.MoveToDraining(errors.New("no datum found"))
return ifrStateUnknown, ifr.DrainHelper()
}
if row[ifr.invertedColIdx].Datum.ResolvedType().Family() != types.BytesFamily {
ifr.MoveToDraining(errors.New("virtual inverted column should have type bytes"))
return ifrStateUnknown, ifr.DrainHelper()
}
enc = []byte(*row[ifr.invertedColIdx].Datum.(*tree.DBytes))
}
shouldAdd, err := ifr.invertedEval.prepareAddIndexRow(enc, nil /* encFull */)
if err != nil {
ifr.MoveToDraining(err)
return ifrStateUnknown, ifr.DrainHelper()
}
if shouldAdd {
// Transform to keyRow which is everything other than the inverted
// column and then add it to the row container and the inverted expr
// evaluator.
copy(ifr.keyRow, row[:ifr.invertedColIdx])
copy(ifr.keyRow[ifr.invertedColIdx:], row[ifr.invertedColIdx+1:])
keyIndex, err := ifr.rc.AddRow(ifr.Ctx, ifr.keyRow)
if err != nil {
ifr.MoveToDraining(err)
return ifrStateUnknown, ifr.DrainHelper()
}
if err = ifr.invertedEval.addIndexRow(keyIndex); err != nil {
ifr.MoveToDraining(err)
return ifrStateUnknown, ifr.DrainHelper()
}
}
return ifrReadingInput, nil
}
func (ifr *invertedFilterer) emitRow() (
invertedFiltererState,
rowenc.EncDatumRow,
*execinfrapb.ProducerMetadata,
) {
drainFunc := func(err error) (
invertedFiltererState,
rowenc.EncDatumRow,
*execinfrapb.ProducerMetadata,
) {
ifr.MoveToDraining(err)
return ifrStateUnknown, nil, ifr.DrainHelper()
}
if ifr.resultIdx >= len(ifr.evalResult) {
// We are done emitting all rows.
return drainFunc(ifr.rc.UnsafeReset(ifr.Ctx))
}
curRowIdx := ifr.resultIdx
ifr.resultIdx++
keyRow, err := ifr.rc.GetRow(ifr.Ctx, ifr.evalResult[curRowIdx], false /* skip */)
if err != nil {
return drainFunc(err)
}
copy(ifr.outputRow[:ifr.invertedColIdx], keyRow[:ifr.invertedColIdx])
copy(ifr.outputRow[ifr.invertedColIdx+1:], keyRow[ifr.invertedColIdx:])
return ifrEmittingRows, ifr.outputRow, nil
}
// Start is part of the RowSource interface.
func (ifr *invertedFilterer) Start(ctx context.Context) {
ctx = ifr.StartInternal(ctx, invertedFiltererProcName)
ifr.input.Start(ctx)
ifr.runningState = ifrReadingInput
}
// ConsumerClosed is part of the RowSource interface.
func (ifr *invertedFilterer) ConsumerClosed() {
// The consumer is done, Next() will not be called again.
ifr.close()
}
func (ifr *invertedFilterer) close() {
if ifr.InternalClose() {
ifr.rc.Close(ifr.Ctx)
if ifr.MemMonitor != nil {
ifr.MemMonitor.Stop(ifr.Ctx)
}
if ifr.diskMonitor != nil {
ifr.diskMonitor.Stop(ifr.Ctx)
}
}
}
// execStatsForTrace implements ProcessorBase.ExecStatsForTrace.
func (ifr *invertedFilterer) execStatsForTrace() *execinfrapb.ComponentStats {
is, ok := getInputStats(ifr.input)
if !ok {
return nil
}
return &execinfrapb.ComponentStats{
Inputs: []execinfrapb.InputStats{is},
Exec: execinfrapb.ExecStats{
MaxAllocatedMem: optional.MakeUint(uint64(ifr.MemMonitor.MaximumBytes())),
MaxAllocatedDisk: optional.MakeUint(uint64(ifr.diskMonitor.MaximumBytes())),
},
Output: ifr.Out.Stats(),
}
}
// ChildCount is part of the execinfra.OpNode interface.
func (ifr *invertedFilterer) ChildCount(verbose bool) int {
if _, ok := ifr.input.(execinfra.OpNode); ok {
return 1
}
return 0
}
// Child is part of the execinfra.OpNode interface.
func (ifr *invertedFilterer) Child(nth int, verbose bool) execinfra.OpNode {
if nth == 0 {
if n, ok := ifr.input.(execinfra.OpNode); ok {
return n
}
panic("input to invertedFilterer is not an execinfra.OpNode")
}
panic(errors.AssertionFailedf("invalid index %d", nth))
}
|
// 155.Hands-on exercise#2 此练习将加强我们对方法集的理解 method
// A繼續往下走因為沒通行證human所以不能過
// B回源頭指向human 因為有碰到所以判定可以過(interface有type OR 能用func***()就算)?
// receiver = 接收器
package main
import (
"fmt"
)
type Person struct {
First string
}
type Human interface {
say()
}
func main() {
p1 := Person{
First: "p1a",
}
//因為往回指向& 有碰到human所以 算有type(interface) 所以給過?
saysomething(&p1)
// 不能用因為沒有type
// saysomething(p1)
p1.say()
}
func (p *Person) say() {
fmt.Println("hi")
}
func saysomething(h Human) {
h.say()
}
|
package main
import (
"net/http"
"fabulous-fox/controllers"
"fabulous-fox/db"
"fabulous-fox/utility"
"github.com/gorilla/mux"
)
func main() {
router := mux.NewRouter()
router.Use(CommonMiddleware())
apiSubrouter := router.PathPrefix("/api").Subrouter()
v1Subrouter := apiSubrouter.PathPrefix("/v1").Subrouter()
v1Subrouter.Use(Authenticator())
v1Subrouter.HandleFunc("/subscriptions", controllers.CreateSubscription).Methods("POST")
v1Subrouter.HandleFunc("/subscriptions", controllers.GetSubscriptions).Methods("GET")
v1Subrouter.HandleFunc("/subscriptions/{uuid}", controllers.UpdateSubscription).Methods("PUT")
v1Subrouter.HandleFunc("/subscriptions/general", controllers.GetCostForCategories).Methods("GET")
v1Subrouter.HandleFunc("/subscriptions/{uuid}", controllers.DeleteSubscription).Methods("DELETE")
v1Subrouter.HandleFunc("/subscriptions/{uuid}", controllers.GetSubscription).Methods("GET")
v1Subrouter.HandleFunc("/user", controllers.GetUser).Methods("GET")
v1Subrouter.HandleFunc("/user/goal", controllers.UpdateGoal).Methods("PUT")
v1Subrouter.HandleFunc("/services", controllers.GetServices).Methods("GET")
apiSubrouter.HandleFunc("/login", controllers.SignIn).Methods("POST")
apiSubrouter.HandleFunc("/register", controllers.Register).Methods("POST")
http.ListenAndServe(utility.GetEnv("PORT", ":3000"), router)
defer db.DB.Close()
}
|
package main
import (
"bufio"
"fmt"
"log"
"math"
"os"
"strconv"
"time"
)
func main() {
partOne()
partTwo()
}
func calcFuel(fuel float64, total float64) float64 {
div := math.Floor(float64(fuel)/3.0) - 2.0
if div <= 0.0 {
return total
} else {
total += div
return calcFuel(div, total)
}
}
func partTwo() {
defer stopwatch(time.Now(), "part 2")
total := 0.0
file, err := os.Open("./input-day1")
handleErr(err)
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
localTotal := 0.0
val, err := strconv.ParseFloat(scanner.Text(), 64)
handleErr(err)
total += calcFuel(val, localTotal)
}
fmt.Println("part 2: " + fmt.Sprintf("%f", total))
}
func partOne() {
defer stopwatch(time.Now(), "part 1")
fuel := 0.0
file, err := os.Open("./input-day1")
handleErr(err)
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
val, err := strconv.ParseFloat(scanner.Text(), 64)
handleErr(err)
fuel += math.Floor(val/3.0) - 2
}
fmt.Println("part 1: " + fmt.Sprintf("%f", fuel))
}
func handleErr(err error) {
if err != nil {
fmt.Println("error")
}
}
func stopwatch(start time.Time, name string) {
elapsed := time.Since(start)
log.Printf("%s took %s", name, elapsed)
}
|
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"time"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/migration"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr"
"github.com/cockroachdb/cockroach/pkg/sql/opt/exec"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/querycache"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/transform"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/cancelchecker"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
)
// extendedEvalContext extends tree.EvalContext with fields that are needed for
// distsql planning.
type extendedEvalContext struct {
tree.EvalContext
SessionMutator *sessionDataMutator
// SessionID for this connection.
SessionID ClusterWideID
// VirtualSchemas can be used to access virtual tables.
VirtualSchemas VirtualTabler
// Tracing provides access to the session's tracing interface. Changes to the
// tracing state should be done through the sessionDataMutator.
Tracing *SessionTracing
// NodesStatusServer gives access to the NodesStatus service. Unavailable to
// tenants.
NodesStatusServer serverpb.OptionalNodesStatusServer
// SQLStatusServer gives access to a subset of the serverpb.Status service
// that is available to both system and non-system tenants.
SQLStatusServer serverpb.SQLStatusServer
// MemMetrics represent the group of metrics to which execution should
// contribute.
MemMetrics *MemoryMetrics
// Tables points to the Session's table collection (& cache).
Descs *descs.Collection
ExecCfg *ExecutorConfig
DistSQLPlanner *DistSQLPlanner
TxnModesSetter txnModesSetter
// Jobs refers to jobs in extraTxnState. Jobs is a pointer to a jobsCollection
// which is a slice because we need calls to resetExtraTxnState to reset the
// jobsCollection.
Jobs *jobsCollection
// SchemaChangeJobCache refers to schemaChangeJobsCache in extraTxnState.
SchemaChangeJobCache map[descpb.ID]*jobs.Job
schemaAccessors *schemaInterface
sqlStatsCollector *sqlStatsCollector
SchemaChangerState *SchemaChangerState
}
// copy returns a deep copy of ctx.
func (evalCtx *extendedEvalContext) copy() *extendedEvalContext {
cpy := *evalCtx
cpy.EvalContext = *evalCtx.EvalContext.Copy()
return &cpy
}
// QueueJob creates a new job from record and queues it for execution after
// the transaction commits.
func (evalCtx *extendedEvalContext) QueueJob(
ctx context.Context, record jobs.Record,
) (*jobs.Job, error) {
jobID := evalCtx.ExecCfg.JobRegistry.MakeJobID()
job, err := evalCtx.ExecCfg.JobRegistry.CreateJobWithTxn(
ctx,
record,
jobID,
evalCtx.Txn,
)
if err != nil {
return nil, err
}
*evalCtx.Jobs = append(*evalCtx.Jobs, jobID)
return job, nil
}
// schemaInterface provides access to the database and table descriptors.
// See schema_accessors.go.
type schemaInterface struct {
logical catalog.Accessor
}
// planner is the centerpiece of SQL statement execution combining session
// state and database state with the logic for SQL execution. It is logically
// scoped to the execution of a single statement, and should not be used to
// execute multiple statements. It is not safe to use the same planner from
// multiple goroutines concurrently.
//
// planners are usually created by using the newPlanner method on a Session.
// If one needs to be created outside of a Session, use makeInternalPlanner().
type planner struct {
txn *kv.Txn
// isInternalPlanner is set to true when this planner is not bound to
// a SQL session.
isInternalPlanner bool
// Corresponding Statement for this query.
stmt Statement
instrumentation instrumentationHelper
// Contexts for different stages of planning and execution.
semaCtx tree.SemaContext
extendedEvalCtx extendedEvalContext
// sessionDataMutator is used to mutate the session variables. Read
// access to them is provided through evalCtx.
sessionDataMutator *sessionDataMutator
// execCfg is used to access the server configuration for the Executor.
execCfg *ExecutorConfig
preparedStatements preparedStatementsAccessor
// avoidCachedDescriptors, when true, instructs all code that
// accesses table/view descriptors to force reading the descriptors
// within the transaction. This is necessary to read descriptors
// from the store for:
// 1. Descriptors that are part of a schema change but are not
// modified by the schema change. (reading a table in CREATE VIEW)
// 2. Disable the use of the table cache in tests.
avoidCachedDescriptors bool
// If set, the planner should skip checking for the SELECT privilege when
// initializing plans to read from a table. This should be used with care.
skipSelectPrivilegeChecks bool
// autoCommit indicates whether we're planning for an implicit transaction.
// If autoCommit is true, the plan is allowed (but not required) to commit the
// transaction along with other KV operations. Committing the txn might be
// beneficial because it may enable the 1PC optimization.
//
// NOTE: plan node must be configured appropriately to actually perform an
// auto-commit. This is dependent on information from the optimizer.
autoCommit bool
// cancelChecker is used by planNodes to check for cancellation of the associated
// query.
cancelChecker *cancelchecker.CancelChecker
// isPreparing is true if this planner is currently preparing.
isPreparing bool
// curPlan collects the properties of the current plan being prepared. This state
// is undefined at the beginning of the planning of each new statement, and cannot
// be reused for an old prepared statement after a new statement has been prepared.
curPlan planTop
// Avoid allocations by embedding commonly used objects and visitors.
txCtx transform.ExprTransformContext
nameResolutionVisitor schemaexpr.NameResolutionVisitor
tableName tree.TableName
// Use a common datum allocator across all the plan nodes. This separates the
// plan lifetime from the lifetime of returned results allowing plan nodes to
// be pool allocated.
alloc *rowenc.DatumAlloc
// optPlanningCtx stores the optimizer planning context, which contains
// data structures that can be reused between queries (for efficiency).
optPlanningCtx optPlanningCtx
// noticeSender allows the sending of notices.
// Do not use this object directly; use the BufferClientNotice() method
// instead.
noticeSender noticeSender
queryCacheSession querycache.Session
// contextDatabaseID is the ID of a database. It is set during some name
// resolution processes to disallow cross database references. In particular,
// the type resolution steps will disallow resolution of types that have a
// parentID != contextDatabaseID when it is set.
contextDatabaseID descpb.ID
}
func (evalCtx *extendedEvalContext) setSessionID(sessionID ClusterWideID) {
evalCtx.SessionID = sessionID
}
// noteworthyInternalMemoryUsageBytes is the minimum size tracked by each
// internal SQL pool before the pool starts explicitly logging overall usage
// growth in the log.
var noteworthyInternalMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_INTERNAL_MEMORY_USAGE", 1<<20 /* 1 MB */)
// internalPlannerParams encapsulates configurable planner fields. The defaults
// are set in newInternalPlanner.
type internalPlannerParams struct {
collection *descs.Collection
}
// InternalPlannerParamsOption is an option that can be passed to
// NewInternalPlanner.
type InternalPlannerParamsOption func(*internalPlannerParams)
// WithDescCollection configures the planner with the provided collection
// instead of the default (creating a new one from scratch).
func WithDescCollection(collection *descs.Collection) InternalPlannerParamsOption {
return func(params *internalPlannerParams) {
params.collection = collection
}
}
// NewInternalPlanner is an exported version of newInternalPlanner. It
// returns an interface{} so it can be used outside of the sql package.
func NewInternalPlanner(
opName string,
txn *kv.Txn,
user security.SQLUsername,
memMetrics *MemoryMetrics,
execCfg *ExecutorConfig,
sessionData sessiondatapb.SessionData,
opts ...InternalPlannerParamsOption,
) (interface{}, func()) {
return newInternalPlanner(opName, txn, user, memMetrics, execCfg, sessionData, opts...)
}
// newInternalPlanner creates a new planner instance for internal usage. This
// planner is not associated with a sql session.
//
// Since it can't be reset, the planner can be used only for planning a single
// statement.
//
// Returns a cleanup function that must be called once the caller is done with
// the planner.
func newInternalPlanner(
opName string,
txn *kv.Txn,
user security.SQLUsername,
memMetrics *MemoryMetrics,
execCfg *ExecutorConfig,
sessionData sessiondatapb.SessionData,
opts ...InternalPlannerParamsOption,
) (*planner, func()) {
// Default parameters which may be override by the supplied options.
params := &internalPlannerParams{
// The table collection used by the internal planner does not rely on the
// deprecatedDatabaseCache and there are no subscribers to the
// deprecatedDatabaseCache, so we can leave it uninitialized.
// Furthermore, we're not concerned about the efficiency of querying tables
// with user-defined types, hence the nil hydratedTables.
collection: descs.NewCollection(execCfg.Settings, execCfg.LeaseManager, nil /* hydratedTables */),
}
for _, opt := range opts {
opt(params)
}
// We need a context that outlives all the uses of the planner (since the
// planner captures it in the EvalCtx, and so does the cleanup function that
// we're going to return. We just create one here instead of asking the caller
// for a ctx with this property. This is really ugly, but the alternative of
// asking the caller for one is hard to explain. What we need is better and
// separate interfaces for planning and running plans, which could take
// suitable contexts.
ctx := logtags.AddTag(context.Background(), opName, "")
sd := &sessiondata.SessionData{
SessionData: sessionData,
SearchPath: sessiondata.DefaultSearchPathForUser(user),
SequenceState: sessiondata.NewSequenceState(),
Location: time.UTC,
}
sd.SessionData.Database = "system"
sd.SessionData.UserProto = user.EncodeProto()
dataMutator := &sessionDataMutator{
data: sd,
defaults: SessionDefaults(map[string]string{
"application_name": "crdb-internal",
"database": "system",
}),
settings: execCfg.Settings,
paramStatusUpdater: &noopParamStatusUpdater{},
setCurTxnReadOnly: func(bool) {},
}
var ts time.Time
if txn != nil {
readTimestamp := txn.ReadTimestamp()
if readTimestamp.IsEmpty() {
panic("makeInternalPlanner called with a transaction without timestamps")
}
ts = readTimestamp.GoTime()
}
p := &planner{execCfg: execCfg, alloc: &rowenc.DatumAlloc{}}
p.txn = txn
p.stmt = Statement{}
p.cancelChecker = cancelchecker.NewCancelChecker(ctx)
p.isInternalPlanner = true
p.semaCtx = tree.MakeSemaContext()
p.semaCtx.SearchPath = sd.SearchPath
p.semaCtx.TypeResolver = p
plannerMon := mon.NewUnlimitedMonitor(ctx,
fmt.Sprintf("internal-planner.%s.%s", user, opName),
mon.MemoryResource,
memMetrics.CurBytesCount, memMetrics.MaxBytesHist,
noteworthyInternalMemoryUsageBytes, execCfg.Settings)
p.extendedEvalCtx = internalExtendedEvalCtx(
ctx, sd, dataMutator, params.collection, txn, ts, ts, execCfg, plannerMon,
)
p.extendedEvalCtx.Planner = p
p.extendedEvalCtx.PrivilegedAccessor = p
p.extendedEvalCtx.SessionAccessor = p
p.extendedEvalCtx.ClientNoticeSender = p
p.extendedEvalCtx.Sequence = p
p.extendedEvalCtx.Tenant = p
p.extendedEvalCtx.JoinTokenCreator = p
p.extendedEvalCtx.ClusterID = execCfg.ClusterID()
p.extendedEvalCtx.ClusterName = execCfg.RPCContext.ClusterName()
p.extendedEvalCtx.NodeID = execCfg.NodeID
p.extendedEvalCtx.Locality = execCfg.Locality
p.sessionDataMutator = dataMutator
p.autoCommit = false
p.extendedEvalCtx.MemMetrics = memMetrics
p.extendedEvalCtx.ExecCfg = execCfg
p.extendedEvalCtx.Placeholders = &p.semaCtx.Placeholders
p.extendedEvalCtx.Annotations = &p.semaCtx.Annotations
p.extendedEvalCtx.Descs = params.collection
p.queryCacheSession.Init()
p.optPlanningCtx.init(p)
return p, func() {
// Note that we capture ctx here. This is only valid as long as we create
// the context as explained at the top of the method.
// The collection will accumulate descriptors read during planning as well
// as type descriptors read during execution on the local node. Many users
// of the internal planner do set the `skipCache` flag on the resolver but
// this is not respected by type resolution underneath execution. That
// subtle details means that the type descriptor used by execution may be
// stale, but that must be okay. Correctness concerns aside, we must release
// the leases to ensure that we don't leak a descriptor lease.
p.Descriptors().ReleaseAll(ctx)
// Stop the memory monitor.
plannerMon.Stop(ctx)
}
}
// internalExtendedEvalCtx creates an evaluation context for an "internal
// planner". Since the eval context is supposed to be tied to a session and
// there's no session to speak of here, different fields are filled in here to
// keep the tests using the internal planner passing.
func internalExtendedEvalCtx(
ctx context.Context,
sd *sessiondata.SessionData,
dataMutator *sessionDataMutator,
tables *descs.Collection,
txn *kv.Txn,
txnTimestamp time.Time,
stmtTimestamp time.Time,
execCfg *ExecutorConfig,
plannerMon *mon.BytesMonitor,
) extendedEvalContext {
evalContextTestingKnobs := execCfg.EvalContextTestingKnobs
var sqlStatsResetter tree.SQLStatsResetter
if execCfg.InternalExecutor != nil {
sqlStatsResetter = execCfg.InternalExecutor.s
}
return extendedEvalContext{
EvalContext: tree.EvalContext{
Txn: txn,
SessionData: sd,
TxnReadOnly: false,
TxnImplicit: true,
Settings: execCfg.Settings,
Codec: execCfg.Codec,
Context: ctx,
Mon: plannerMon,
TestingKnobs: evalContextTestingKnobs,
StmtTimestamp: stmtTimestamp,
TxnTimestamp: txnTimestamp,
InternalExecutor: execCfg.InternalExecutor,
SQLStatsResetter: sqlStatsResetter,
},
SessionMutator: dataMutator,
VirtualSchemas: execCfg.VirtualSchemas,
Tracing: &SessionTracing{},
NodesStatusServer: execCfg.NodesStatusServer,
Descs: tables,
ExecCfg: execCfg,
schemaAccessors: newSchemaInterface(tables, execCfg.VirtualSchemas),
DistSQLPlanner: execCfg.DistSQLPlanner,
}
}
// LogicalSchemaAccessor is part of the resolver.SchemaResolver interface.
func (p *planner) LogicalSchemaAccessor() catalog.Accessor {
return p.extendedEvalCtx.schemaAccessors.logical
}
// SemaCtx provides access to the planner's SemaCtx.
func (p *planner) SemaCtx() *tree.SemaContext {
return &p.semaCtx
}
// Note: if the context will be modified, use ExtendedEvalContextCopy instead.
func (p *planner) ExtendedEvalContext() *extendedEvalContext {
return &p.extendedEvalCtx
}
func (p *planner) ExtendedEvalContextCopy() *extendedEvalContext {
return p.extendedEvalCtx.copy()
}
// CurrentDatabase is part of the resolver.SchemaResolver interface.
func (p *planner) CurrentDatabase() string {
return p.SessionData().Database
}
// CurrentSearchPath is part of the resolver.SchemaResolver interface.
func (p *planner) CurrentSearchPath() sessiondata.SearchPath {
return p.SessionData().SearchPath
}
// EvalContext() provides convenient access to the planner's EvalContext().
func (p *planner) EvalContext() *tree.EvalContext {
return &p.extendedEvalCtx.EvalContext
}
func (p *planner) Descriptors() *descs.Collection {
return p.extendedEvalCtx.Descs
}
// ExecCfg implements the PlanHookState interface.
func (p *planner) ExecCfg() *ExecutorConfig {
return p.extendedEvalCtx.ExecCfg
}
// GetOrInitSequenceCache returns the sequence cache for the session.
// If the sequence cache has not been used yet, it initializes the cache
// inside the session data.
func (p *planner) GetOrInitSequenceCache() sessiondata.SequenceCache {
if p.SessionData().SequenceCache == nil {
p.sessionDataMutator.initSequenceCache()
}
return p.SessionData().SequenceCache
}
func (p *planner) LeaseMgr() *lease.Manager {
return p.Descriptors().LeaseManager()
}
func (p *planner) Txn() *kv.Txn {
return p.txn
}
func (p *planner) User() security.SQLUsername {
return p.SessionData().User()
}
func (p *planner) TemporarySchemaName() string {
return temporarySchemaName(p.ExtendedEvalContext().SessionID)
}
// DistSQLPlanner returns the DistSQLPlanner
func (p *planner) DistSQLPlanner() *DistSQLPlanner {
return p.extendedEvalCtx.DistSQLPlanner
}
// MigrationJobDeps returns the migration.JobDeps.
func (p *planner) MigrationJobDeps() migration.JobDeps {
return p.execCfg.MigrationJobDeps
}
// GetTypeFromValidSQLSyntax implements the tree.EvalPlanner interface.
// We define this here to break the dependency from eval.go to the parser.
func (p *planner) GetTypeFromValidSQLSyntax(sql string) (*types.T, error) {
ref, err := parser.GetTypeFromValidSQLSyntax(sql)
if err != nil {
return nil, err
}
return tree.ResolveType(context.TODO(), ref, p.semaCtx.GetTypeResolver())
}
// ParseQualifiedTableName implements the tree.EvalDatabase interface.
// This exists to get around a circular dependency between sql/sem/tree and
// sql/parser. sql/parser depends on tree to make objects, so tree cannot import
// ParseQualifiedTableName even though some builtins need that function.
// TODO(jordan): remove this once builtins can be moved outside of sql/sem/tree.
func (p *planner) ParseQualifiedTableName(sql string) (*tree.TableName, error) {
return parser.ParseQualifiedTableName(sql)
}
// ResolveTableName implements the tree.EvalDatabase interface.
func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) (tree.ID, error) {
flags := tree.ObjectLookupFlagsWithRequiredTableKind(tree.ResolveAnyTableKind)
desc, err := resolver.ResolveExistingTableObject(ctx, p, tn, flags)
if err != nil {
return 0, err
}
return tree.ID(desc.GetID()), nil
}
// LookupTableByID looks up a table, by the given descriptor ID. Based on the
// CommonLookupFlags, it could use or skip the Collection cache. See
// Collection.getTableVersionByID for how it's used.
// TODO (SQLSchema): This should call into the set of SchemaAccessors instead
// of having its own logic for lookups.
func (p *planner) LookupTableByID(
ctx context.Context, tableID descpb.ID,
) (catalog.TableDescriptor, error) {
if entry, err := p.getVirtualTabler().getVirtualTableEntryByID(tableID); err == nil {
return entry.desc, nil
}
flags := tree.ObjectLookupFlags{CommonLookupFlags: tree.CommonLookupFlags{AvoidCached: p.avoidCachedDescriptors}}
table, err := p.Descriptors().GetImmutableTableByID(ctx, p.txn, tableID, flags)
if err != nil {
return nil, err
}
return table, nil
}
// TypeAsString enforces (not hints) that the given expression typechecks as a
// string and returns a function that can be called to get the string value
// during (planNode).Start.
// To also allow NULLs to be returned, use TypeAsStringOrNull() instead.
func (p *planner) TypeAsString(
ctx context.Context, e tree.Expr, op string,
) (func() (string, error), error) {
typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
evalFn := p.makeStringEvalFn(typedE)
return func() (string, error) {
isNull, str, err := evalFn()
if err != nil {
return "", err
}
if isNull {
return "", errors.Errorf("expected string, got NULL")
}
return str, nil
}, nil
}
// TypeAsStringOrNull is like TypeAsString but allows NULLs.
func (p *planner) TypeAsStringOrNull(
ctx context.Context, e tree.Expr, op string,
) (func() (bool, string, error), error) {
typedE, err := tree.TypeCheckAndRequire(ctx, e, &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
return p.makeStringEvalFn(typedE), nil
}
func (p *planner) makeStringEvalFn(typedE tree.TypedExpr) func() (bool, string, error) {
return func() (bool, string, error) {
d, err := typedE.Eval(p.EvalContext())
if err != nil {
return false, "", err
}
if d == tree.DNull {
return true, "", nil
}
str, ok := d.(*tree.DString)
if !ok {
return false, "", errors.Errorf("failed to cast %T to string", d)
}
return false, string(*str), nil
}
}
// KVStringOptValidate indicates the requested validation of a TypeAsStringOpts
// option.
type KVStringOptValidate string
// KVStringOptValidate values
const (
KVStringOptAny KVStringOptValidate = `any`
KVStringOptRequireNoValue KVStringOptValidate = `no-value`
KVStringOptRequireValue KVStringOptValidate = `value`
)
// evalStringOptions evaluates the KVOption values as strings and returns them
// in a map. Options with no value have an empty string.
func evalStringOptions(
evalCtx *tree.EvalContext, opts []exec.KVOption, optValidate map[string]KVStringOptValidate,
) (map[string]string, error) {
res := make(map[string]string, len(opts))
for _, opt := range opts {
k := opt.Key
validate, ok := optValidate[k]
if !ok {
return nil, errors.Errorf("invalid option %q", k)
}
val, err := opt.Value.Eval(evalCtx)
if err != nil {
return nil, err
}
if val == tree.DNull {
if validate == KVStringOptRequireValue {
return nil, errors.Errorf("option %q requires a value", k)
}
res[k] = ""
} else {
if validate == KVStringOptRequireNoValue {
return nil, errors.Errorf("option %q does not take a value", k)
}
str, ok := val.(*tree.DString)
if !ok {
return nil, errors.Errorf("expected string value, got %T", val)
}
res[k] = string(*str)
}
}
return res, nil
}
// TypeAsStringOpts enforces (not hints) that the given expressions
// typecheck as strings, and returns a function that can be called to
// get the string value during (planNode).Start.
func (p *planner) TypeAsStringOpts(
ctx context.Context, opts tree.KVOptions, optValidate map[string]KVStringOptValidate,
) (func() (map[string]string, error), error) {
typed := make(map[string]tree.TypedExpr, len(opts))
for _, opt := range opts {
k := string(opt.Key)
validate, ok := optValidate[k]
if !ok {
return nil, errors.Errorf("invalid option %q", k)
}
if opt.Value == nil {
if validate == KVStringOptRequireValue {
return nil, errors.Errorf("option %q requires a value", k)
}
typed[k] = nil
continue
}
if validate == KVStringOptRequireNoValue {
return nil, errors.Errorf("option %q does not take a value", k)
}
r, err := tree.TypeCheckAndRequire(ctx, opt.Value, &p.semaCtx, types.String, k)
if err != nil {
return nil, err
}
typed[k] = r
}
fn := func() (map[string]string, error) {
res := make(map[string]string, len(typed))
for name, e := range typed {
if e == nil {
res[name] = ""
continue
}
d, err := e.Eval(p.EvalContext())
if err != nil {
return nil, err
}
str, ok := d.(*tree.DString)
if !ok {
return res, errors.Errorf("failed to cast %T to string", d)
}
res[name] = string(*str)
}
return res, nil
}
return fn, nil
}
// TypeAsStringArray enforces (not hints) that the given expressions all typecheck as
// strings and returns a function that can be called to get the string values
// during (planNode).Start.
func (p *planner) TypeAsStringArray(
ctx context.Context, exprs tree.Exprs, op string,
) (func() ([]string, error), error) {
typedExprs := make([]tree.TypedExpr, len(exprs))
for i := range exprs {
typedE, err := tree.TypeCheckAndRequire(ctx, exprs[i], &p.semaCtx, types.String, op)
if err != nil {
return nil, err
}
typedExprs[i] = typedE
}
fn := func() ([]string, error) {
strs := make([]string, len(exprs))
for i := range exprs {
d, err := typedExprs[i].Eval(p.EvalContext())
if err != nil {
return nil, err
}
str, ok := d.(*tree.DString)
if !ok {
return strs, errors.Errorf("failed to cast %T to string", d)
}
strs[i] = string(*str)
}
return strs, nil
}
return fn, nil
}
// SessionData is part of the PlanHookState interface.
func (p *planner) SessionData() *sessiondata.SessionData {
return p.EvalContext().SessionData
}
// Ann is a shortcut for the Annotations from the eval context.
func (p *planner) Ann() *tree.Annotations {
return p.ExtendedEvalContext().EvalContext.Annotations
}
// txnModesSetter is an interface used by SQL execution to influence the current
// transaction.
type txnModesSetter interface {
// setTransactionModes updates some characteristics of the current
// transaction.
// asOfTs, if not empty, is the evaluation of modes.AsOf.
setTransactionModes(modes tree.TransactionModes, asOfTs hlc.Timestamp) error
}
// CompactEngineSpan is part of the EvalPlanner interface.
func (p *planner) CompactEngineSpan(
ctx context.Context, nodeID int32, storeID int32, startKey []byte, endKey []byte,
) error {
if !p.ExecCfg().Codec.ForSystemTenant() {
return errorutil.UnsupportedWithMultiTenancy(errorutil.FeatureNotAvailableToNonSystemTenantsIssue)
}
conn, err := p.ExecCfg().DistSender.NodeDialer().Dial(ctx, roachpb.NodeID(nodeID), rpc.DefaultClass)
if err != nil {
return errors.Wrapf(err, "could not dial node ID %d", nodeID)
}
client := kvserver.NewPerStoreClient(conn)
req := &kvserver.CompactEngineSpanRequest{
StoreRequestHeader: kvserver.StoreRequestHeader{
NodeID: roachpb.NodeID(nodeID),
StoreID: roachpb.StoreID(storeID),
},
Span: roachpb.Span{Key: roachpb.Key(startKey), EndKey: roachpb.Key(endKey)},
}
_, err = client.CompactEngineSpan(ctx, req)
return err
}
// validateDescriptor is a convenience function for validating
// descriptors in the context of a planner.
func validateDescriptor(ctx context.Context, p *planner, descriptor catalog.Descriptor) error {
bdg := catalogkv.NewOneLevelUncachedDescGetter(p.Txn(), p.ExecCfg().Codec)
return catalog.ValidateSelfAndCrossReferences(ctx, bdg, descriptor)
}
|
package chanPool
// 工作
type Job func()
|
package libp2p
import "github.com/libp2p/go-libp2p-core/protocol"
const (
// PandoProtocolID is the libp2p protocol that pando API uses
PandoProtocolID protocol.ID = "/Pando/libp2p/0.0.1"
)
|
package main
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"log"
"net/http"
"os"
"strconv"
)
func main() {
r := mux.NewRouter()
r.HandleFunc("/api/{x}/add/{y}", addHandler).Methods("GET")
r.HandleFunc("/api/{x}/sub/{y}", subHandler).Methods("GET")
r.HandleFunc("/api/{x}/mult/{y}", multHandler).Methods("GET")
r.HandleFunc("/api/{x}/div/{y}", divHandler).Methods("GET")
log.Fatal(http.ListenAndServe(":8080", r))
}
func addHandler(w http.ResponseWriter, r *http.Request) {
type DATA struct {
X int
Y int
}
vars := mux.Vars(r)
x, _ := strconv.Atoi(vars["x"])
y, _ := strconv.Atoi(vars["y"])
rslt := DATA{
X: x,
Y: y,
}
if jsonStr, err := json.Marshal(rslt); err != nil {
fmt.Println("error:", err)
} else {
os.Stdout.Write(jsonStr)
}
fmt.Fprintln(w, "x + y = ", x+y)
}
func subHandler(w http.ResponseWriter, r *http.Request) {
type DATA struct {
X int
Y int
}
vars := mux.Vars(r)
x, _ := strconv.Atoi(vars["x"])
y, _ := strconv.Atoi(vars["y"])
rslt := DATA{
X: x,
Y: y,
}
if jsonStr, err := json.Marshal(rslt); err != nil {
fmt.Println("error:", err)
} else {
os.Stdout.Write(jsonStr)
}
fmt.Fprintln(w, "x - y = ", x-y)
}
func multHandler(w http.ResponseWriter, r *http.Request) {
type DATA struct {
X int
Y int
}
vars := mux.Vars(r)
x, _ := strconv.Atoi(vars["x"])
y, _ := strconv.Atoi(vars["y"])
rslt := DATA{
X: x,
Y: y,
}
if jsonStr, err := json.Marshal(rslt); err != nil {
fmt.Println("error:", err)
} else {
os.Stdout.Write(jsonStr)
}
fmt.Fprintln(w, "x * y = ", x*y)
}
func divHandler(w http.ResponseWriter, r *http.Request) {
type DATA struct {
X int
Y int
}
vars := mux.Vars(r)
x, _ := strconv.Atoi(vars["x"])
y, _ := strconv.Atoi(vars["y"])
rslt := DATA{
X: x,
Y: y,
}
if jsonStr, err := json.Marshal(rslt); err != nil {
fmt.Println("error:", err)
} else {
os.Stdout.Write(jsonStr)
}
fmt.Fprintln(w, "x / y = ", x/y)
}
|
package run
import (
"context"
"math/rand"
"time"
)
// WithRetry enables an application to handle transient failures by transparently retrying a failed operation.
func WithRetry(backoffs []time.Duration, classifier func(error) Result, fn func(context.Context) error) func(context.Context) error {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
return func(ctx context.Context) error {
var err error
for _, backoff := range backoffs {
err = fn(ctx)
switch classifier(err) {
case Succeed, Cancel:
return err
case Retry:
jitter := rnd.Float64() / 2
sleep := backoff + time.Duration(jitter*float64(backoff))
Sleep(ctx, sleep)
}
}
return err
}
}
// Result is the type returned by error classifier functions to indicate whether a retry should proceed.
type Result int
// In case of errors it can handle the failure using the following strategies
const (
// Succeed indicates that the run was as a success, there is no need to retry
Succeed Result = iota
// Cancel indicates a hard failure that should not be retried.
// It indicates that the failure isn't transient or is unlikely to be successful if repeated,
// the application should cancel the operation and report the error.
// For example, an authentication failure caused by providing invalid credentials is not
// likely to succeed no matter how many times it's attempted.
Cancel
// Retry indicates a soft failure and should be retried
// If the specific fault reported is unusual or rare, it might have been caused by unusual circumstances
// such as a network packet becoming corrupted while it was being transmitted. In this case,
// the application could retry the failing request again immediately because the same failure is unlikely
// to be repeated and the request will probably be successful.
Retry
)
// NotNilClassifier is an error classifier function that returns Succeed if error is nil,
// otherwise it returns Retry
func NotNilClassifier() func(error) Result {
return func(err error) Result {
if err == nil {
return Succeed
}
return Retry
}
}
// ExponentialBackoff generates an exponential back-off strategy, retrying the given
// number of times and doubling the waiting time in every retry.
func ExponentialBackoff(retries int, initial time.Duration) []time.Duration {
durations := make([]time.Duration, retries)
for i := range durations {
durations[i] = initial
initial *= 2
}
return durations
}
// ConstantBackoff generates a back-off strategy of retrying the given
// number of times and waiting the specified time duration after each one.
func ConstantBackoff(retries int, backoff time.Duration) []time.Duration {
durations := make([]time.Duration, retries)
for i := range durations {
durations[i] = backoff
}
return durations
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"context"
"github.com/google/gapid/core/context/keys"
)
type taskKeyTy string
const taskKey = taskKeyTy("task")
// PutTask attaches a task to a Context.
func PutTask(ctx context.Context, t *Task) context.Context {
return keys.WithValue(ctx, taskKey, t)
}
// GetTask retrieves the task from a context previously annotated by PutTask.
func GetTask(ctx context.Context) *Task {
val := ctx.Value(taskKey)
if val == nil {
return nil
}
return val.(*Task)
}
|
package users
import (
"github.com/BalkanTech/goilerplate/config"
db "github.com/BalkanTech/goilerplate/databases"
"testing"
"strconv"
"gopkg.in/mgo.v2/bson"
"github.com/jinzhu/gorm"
"gopkg.in/mgo.v2"
)
const testOK = "\u2714"
const testFailed = "\u2718"
var GormConfig = &config.Config{
Database: config.Database{
Type: "sqlite3",
DB: "/tmp/test.db",
},
}
var MgoConfig = &config.Config{
Database: config.Database{
Type: "mongodb",
Host: "localhost",
DB: "test",
User: "test",
Password: "test",
},
}
func getTestUser() *User {
user := &User{GID: 1, MID: bson.ObjectIdHex("588c89a388c8f1120a0828f6"), Username: "test", Email: "test@example.com"}
user.SetPassword("test")
return user
}
func getUserControllerGorm() (*Users, *gorm.DB, error) {
dbase, err := db.NewGormConnection(GormConfig)
if err != nil {
return nil, nil, err
}
m := NewUserManagerGorm(dbase)
return UserController(m),dbase, nil
}
func getUserControllerMgo() (*Users, *mgo.Session, error) {
dbase, err := db.NewMgoConnection(MgoConfig)
if err != nil {
return nil, nil, err
}
m := NewUserManagerMgo(dbase, "users")
return UserController(m), dbase.Session, nil
}
// TestInit will test the initialization processes with dropping the table
func TestInit(t *testing.T) {
t.Log("Testing Init for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Init(true)
if err != nil {
t.Fatalf("\tExpected to be able to drop an initialize the table, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to drop and initialize the table. ", testOK)
}
t.Log("Testing Init for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Init(true)
if err != nil {
t.Fatalf("\tExpected to be able to drop an initialize the table, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to drop and initialize the table. ", testOK)
}
}
// TestCreate tries to create a new user in the database
func TestCreate(t *testing.T) {
testUser := getTestUser()
t.Log("Testing Create for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Create(testUser)
if err != nil {
t.Fatalf("\tExpected to be able to create, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to create. ", testOK)
}
t.Log("Testing Create for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
t.Log(testUser)
err = users.Create(testUser)
if err != nil {
t.Fatalf("\tExpected to be able to create, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to create. ", testOK)
}
}
// TestGetByID tries to get a user from the database by ID
func TestGetByID(t *testing.T) {
user1 := getTestUser()
t.Log("Testing GetByID for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
id := strconv.FormatUint(uint64(user1.GID), 10)
user2, err := users.GetByID(id)
if err != nil {
t.Fatalf("\tExpected to be able to read by ID, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read by ID. ", testOK)
if user2.GID != user1.GID || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\""+
", but got ID: %d, Username: \"%s\", Email: \"%s\" instead. %v",
user1.GID, user1.Username, user1.Email,
user2.GID, user2.Username, user2.Email, testFailed)
}
t.Logf("\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\". %v",
user1.GID, user1.Username, user1.Email, testOK)
}
t.Log("Testing GetByID for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
id := user1.MID.Hex()
t.Log(id)
user2, err := users.GetByID(id)
if err != nil {
t.Fatalf("\tExpected to be able to read by ID, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read by ID. ", testOK)
if user2.MID.Hex() != user1.MID.Hex() || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\""+
", but got ID: %s, Username: \"%s\", Email: \"%s\" instead. %v",
user1.MID.Hex(), user1.Username, user1.Email,
user2.MID.Hex(), user2.Username, user2.Email, testFailed)
}
t.Logf("\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\". %v",
user1.MID.Hex(), user1.Username, user1.Email, testOK)
}
}
// TestGetByID tries to get a user from the database by email
func TestGetByEmail(t *testing.T) {
user1 := getTestUser()
t.Log("Testing GetByEmail for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
user2, err := users.GetByEmail(user1.Email)
if err != nil {
t.Fatalf("\tExpected to be able to read by Email, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read by Email. ", testOK)
if user2.GID != user1.GID || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\""+
", but got ID: %d, Username: \"%s\", Email: \"%s\" instead. %v",
user1.GID, user1.Username, user1.Email,
user2.GID, user2.Username, user2.Email, testFailed)
}
t.Logf("\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\". %v",
user1.GID, user1.Username, user1.Email, testOK)
}
t.Log("Testing GetByEmail for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
user2, err := users.GetByEmail(user1.Email)
if err != nil {
t.Fatalf("\tExpected to be able to read by Email, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read by Email. ", testOK)
if user2.MID.Hex() != user1.MID.Hex() || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\""+
", but got ID: %s, Username: \"%s\", Email: \"%s\" instead. %v",
user1.MID.Hex(), user1.Username, user1.Email,
user2.MID.Hex(), user2.Username, user2.Email, testFailed)
}
t.Logf("\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\". %v",
user1.MID.Hex(), user1.Username, user1.Email, testOK)
}
}
// TestGet tries to get a user from the database by a custom query
func TestGet(t *testing.T) {
user1 := getTestUser()
t.Log("Testing Get for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
user2, err := users.Get("email = ? AND username = ?", user1.Email, user1.Username)
if err != nil {
t.Fatalf("\tExpected to be able to read by custom query, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read by custom query. ", testOK)
if user2.GID != user1.GID || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\""+", but got ID: %d, Username: \"%s\", Email: \"%s\" instead. %v",
user1.GID, user1.Username, user1.Email,
user2.GID, user2.Username, user2.Email, testFailed)
}
t.Logf("\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\". %v",
user1.GID, user1.Username, user1.Email, testOK)
}
t.Log("Testing Get for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
user2, err := users.Get(bson.M{"email": user1.Email, "username": user1.Username})
if err != nil {
t.Fatalf("\tExpected to be able to read by custom query, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read by custom query. ", testOK)
if user2.MID.Hex() != user1.MID.Hex() || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\""+", but got ID: %s, Username: \"%s\", Email: \"%s\" instead. %v",
user1.MID.Hex(), user1.Username, user1.Email,
user2.MID.Hex(), user2.Username, user2.Email, testFailed)
}
t.Logf("\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\". %v",
user1.MID.Hex(), user1.Username, user1.Email, testOK)
}
}
// TestFind tries to get a user array from the database by a custom query
func TestFind(t *testing.T) {
user1 := getTestUser()
t.Log("Testing Find for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
usr, err := users.Find("id = ?", user1.GID)
if err != nil {
t.Fatalf("\tExpected to be able to read many by custom query, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read many by custom query. ", testOK)
if len(*usr) != 1 {
t.Fatalf("\t\tExpected array length to be 1, but got %d instead. %v", len(*usr), testFailed)
}
t.Log("\t\tExpected array length to be 1. ", testOK)
}
t.Log("Testing Find for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
usr, err := users.Find(bson.M{"_id": user1.MID})
if err != nil {
t.Fatalf("\tExpected to be able to read many by custom query, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to read many by custom query. ", testOK)
if len(*usr) != 1 {
t.Fatalf("\t\tExpected array length to be 1, but got %d instead. %v", len(*usr), testFailed)
}
t.Log("\t\tExpected array length to be 1. ", testOK)
}
}
// TestAuthentication tries to perform authentication by Username, Email, Username or email
func TestAuthentication(t *testing.T) {
user1 := getTestUser()
tests := []struct{
Arg string
Method uint
Description string
}{
{"test", AuthByUsername, "username"},
{"test@example.com", AuthByEmail, "email"},
{"test", AuthByUsernameOrEmail, "username or email"},
{"test@example.com", AuthByUsernameOrEmail, "username or email"},
}
t.Log("Testing Authentication for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
for _,test := range tests {
t.Logf("\tAuthenticate by %s with \"%s\"", test.Description, test.Arg)
{
user2, err := users.Authenticate(test.Arg, "test", test.Method)
if err != nil {
t.Fatalf("\t\tExpected to be able to authenticate, but got error: %s. %v", err, testFailed)
}
t.Log("\t\tExpected to be able to authenticate. ", testOK)
if user2.GID != user1.GID || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\""+
", but got ID: %d, Username: \"%s\", Email: \"%s\" instead. %v",
user1.GID, user1.Username, user1.Email,
user2.GID, user2.Username, user2.Email, testFailed)
}
t.Logf("\t\t\tExpected ID: %d, Username: \"%s\", Email: \"%s\". %v",
user1.GID, user1.Username, user1.Email, testOK)
}
}
}
t.Log("Testing Authentication for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
for _,test := range tests {
t.Logf("\tAuthenticate by %s with \"%s\"", test.Description, test.Arg)
{
user2, err := users.Authenticate(test.Arg, "test", test.Method)
if err != nil {
t.Fatalf("\t\tExpected to be able to authenticate, but got error: %s. %v", err, testFailed)
}
t.Log("\t\tExpected to be able to authenticate. ", testOK)
if user2.MID.Hex() != user1.MID.Hex() || user2.Username != user1.Username || user2.Email != user1.Email {
t.Fatalf("\t\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\""+
", but got ID: %s, Username: \"%s\", Email: \"%s\" instead. %v",
user1.MID.Hex(), user1.Username, user1.Email,
user2.MID.Hex(), user2.Username, user2.Email, testFailed)
}
t.Logf("\t\t\tExpected ID: %s, Username: \"%s\", Email: \"%s\". %v",
user1.MID.Hex(), user1.Username, user1.Email, testOK)
}
}
}
}
// TestUpdate tries to save an updated user to the database
func TestUpdate(t *testing.T) {
user1 := getTestUser()
user1.Email = "testuser@example.com"
t.Log("Testing Update for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Update(user1)
if err != nil {
t.Fatalf("\tExpected to be able to update, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to update. ", testOK)
}
t.Log("Testing Update for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Update(user1)
if err != nil {
t.Fatalf("\tExpected to be able to update, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to update. ", testOK)
}
}
// TestDelete tries to delete the test user in the database
func TestDelete(t *testing.T) {
user1 := getTestUser()
t.Log("Testing Delete for Gorm")
{
users, d, err := getUserControllerGorm()
defer d.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Delete(user1)
if err != nil {
t.Fatalf("\tExpected to be able to delete, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to delete. ", testOK)
}
t.Log("Testing Delete for MGO")
{
users, s, err := getUserControllerMgo()
defer s.Close()
if err != nil {
t.Fatalf("\tExpected to get a user controller, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to get a user controller", testOK)
err = users.Delete(user1)
if err != nil {
t.Fatalf("\tExpected to be able to delete, but got error: %s. %v", err, testFailed)
}
t.Log("\tExpected to be able to delete. ", testOK)
}
}
func TestUser_GetAvatarURL(t *testing.T) {
u := &User{Email: "MyEmailAddress@example.com"}
u.setEmailMD5()
t.Log("Testing User.GetAvatarURL without a profile AvatarURL")
{
expected := "//www.gravatar.com/avatar/0bc83cb571cd1c50ba6f3e8a78ef1346"
response := u.GetAvatarURL()
if response != expected {
t.Fatalf("\tExpected response \"%s\", but got \"%s\". %v", expected, response, testFailed)
}
t.Logf("\tExpected response \"%s\". %v", expected, testOK)
}
t.Log("Testing User.GetAvatarURL with a profile AvatarURL")
{
u.Profile = Profile{AvatarURL: "http://example.com/test.png"}
expected := "http://example.com/test.png"
response := u.GetAvatarURL()
if response != expected {
t.Fatalf("\tExpected response \"%s\", but got \"%s\". %v", expected, response, testFailed)
}
t.Logf("\tExpected response \"%s\". %v", expected, testOK)
}
} |
package main
import "fmt"
func main() {
// Go has type inference
x := "string"
// The + operator also does concatenation.
x += "string"
// Constants:
const i int = 0
// Multiple variables:
var (
a = 0
b = 1
c = 2
)
// Multiple constants:
const (
d = 3
e = 4
)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analysis
import (
"github.com/google/gapid/gapil/ast"
"github.com/google/gapid/gapil/semantic"
)
// findUnreachables returns the list of blocks and statements that are found
// by traversing api which are not found in reached.
func findUnreachables(
api *semantic.API,
mappings *semantic.Mappings,
reached map[ast.Node]struct{}) []Unreachable {
// Output list of unreachable blocks and statements.
unreachables := []Unreachable{}
// Blocks and statements that have already been reported as unreachable.
reported := map[ast.Node]struct{}{}
// check looks to see if n has been marked as reached. If n has not been
// reached then it is added to the list of unreachables and check returns
// false.
check := func(n semantic.Node) bool {
for _, a := range mappings.SemanticToAST[n] {
if _, ok := reached[a]; !ok {
// Ensure this is reported only once.
if _, ok := reported[a]; !ok {
reported[a] = struct{}{}
at := mappings.AST.CST(a)
unreachables = append(unreachables, Unreachable{At: at, Node: n})
}
return false
}
}
return true
}
// The recursive traversal function.
var traverse func(n semantic.Node)
traverse = func(n semantic.Node) {
switch n := n.(type) {
case *semantic.Function:
if n.GetAnnotation("ignore_unreachables") == nil {
semantic.Visit(n, traverse)
}
case *semantic.Block:
if check(n) {
for _, s := range n.Statements {
if !check(s) {
break
}
traverse(s)
}
}
case semantic.Type, *semantic.Callable:
// Not interested in these.
default:
semantic.Visit(n, traverse)
}
}
// Peform the traversal and find all unreachables.
traverse(api)
return unreachables
}
|
package main
type course struct {
jsonobj map[string]interface{}
name string
constraints *constraints
}
type lecture struct {
coursejsonobj map[string]interface{}
course *course
jsonobj map[string]interface{}
constraints *constraints
assignedInstructor *instructor
instructorCandidates []*instructor
vistedInstructors map[*instructor]bool
assignedRoom *room
roomCandidates []*room
vistedRooms map[*room]bool
assignedTimeslot *timeslot
timeslots []*timeslot
vistedTimeslots map[*timeslot]bool
resolved bool
resolving bool
}
func (lecture *lecture) init(coursejsonobj, jsonobj map[string]interface{}) {
lecture.jsonobj = jsonobj
lecture.coursejsonobj = coursejsonobj
lecture.vistedTimeslots = map[*timeslot]bool{}
lecture.vistedInstructors = map[*instructor]bool{}
lecture.vistedRooms = map[*room]bool{}
}
func (course *course) getJSONOBJ() map[string]interface{} {
return course.jsonobj
}
func (lecture *lecture) getJSONOBJ() map[string]interface{} {
return lecture.jsonobj
}
func (lecture *lecture) setTimeslot(state *state) bool {
if lecture.assignedTimeslot != nil {
lecture.unassignTimeslot()
}
for _, timeslot := range lecture.timeslots {
if !lecture.vistedTimeslots[timeslot] {
lecture.assignTimeslot(timeslot)
lecture.vistedTimeslots[timeslot] = true
return true
}
}
return false
}
func (lecture *lecture) setRoom(state *state) bool {
if lecture.assignedRoom != nil {
lecture.unassignRoom()
}
for _, room := range lecture.roomCandidates {
if !lecture.vistedRooms[room] && room.validLecture(lecture) {
lecture.assignRoom(room)
lecture.vistedRooms[room] = true
return true
}
}
return false
}
func (lecture *lecture) setInstructor(state *state) bool {
if lecture.assignedInstructor != nil {
lecture.unassignInstructor()
}
for _, instructor := range lecture.instructorCandidates {
if !lecture.vistedInstructors[instructor] && instructor.validLecture(lecture) {
lecture.assignInstructor(instructor)
lecture.vistedInstructors[instructor] = true
return true
}
}
return false
}
func (lecture *lecture) assignInstructor(instructor *instructor) {
lecture.assignedInstructor = instructor
instructor.assignLecture(lecture)
}
func (lecture *lecture) assignRoom(room *room) {
lecture.assignedRoom = room
room.assignLecture(lecture)
}
func (lecture *lecture) assignTimeslot(timeslot *timeslot) {
lecture.assignedTimeslot = timeslot
timeslot.assignLecture(lecture)
}
func (lecture *lecture) unassignInstructor() {
lecture.assignedInstructor.unassignLecture(lecture)
lecture.assignedInstructor = nil
}
func (lecture *lecture) unassignRoom() {
lecture.assignedRoom.unassignLecture(lecture)
lecture.assignedRoom = nil
}
func (lecture *lecture) unassignTimeslot() {
lecture.assignedTimeslot.unassignLecture(lecture)
lecture.assignedTimeslot = nil
}
func (lecture *lecture) resetRooms() {
if lecture.assignedRoom != nil {
lecture.unassignRoom()
}
lecture.vistedRooms = map[*room]bool{}
}
func (lecture *lecture) resetInstructors() {
if lecture.assignedInstructor != nil {
lecture.unassignInstructor()
}
lecture.vistedInstructors = map[*instructor]bool{}
}
func (lecture *lecture) resetTimeslots() {
if lecture.assignedTimeslot != nil {
lecture.unassignTimeslot()
}
lecture.vistedTimeslots = map[*timeslot]bool{}
}
|
package builder_test
import (
"testing"
"time"
"github.com/sohaha/zlsgo"
"github.com/zlsgo/zdb"
"github.com/zlsgo/zdb/builder"
"github.com/zlsgo/zdb/driver"
"github.com/zlsgo/zdb/driver/mssql"
"github.com/zlsgo/zdb/driver/mysql"
"github.com/zlsgo/zdb/driver/postgres"
"github.com/zlsgo/zdb/driver/sqlite3"
"github.com/zlsgo/zdb/schema"
)
func TestCreateTable(t *testing.T) {
tt := zlsgo.NewTest(t)
b := builder.CreateTable("user").IfNotExists()
b.Define("id", "BIGINT(20)", "NOT NULL", "AUTO_INCREMENT", "PRIMARY KEY", `COMMENT "用户ID"`)
sql := b.String()
t.Log(sql)
tt.Equal(`CREATE TABLE IF NOT EXISTS user (id BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY COMMENT "用户ID")`, sql)
b = builder.CreateTempTable("user").IfNotExists()
b.SetDriver(&mysql.Config{})
b.Define("id", "BIGINT(20)", "NOT NULL", "AUTO_INCREMENT", "PRIMARY KEY")
b.Define("name", "VARCHAR(255)", "NOT NULL")
b.Define("created_at", "DATETIME", "NOT NULL")
b.Define("modified_at", "DATETIME", "NOT NULL")
b.Define("KEY", "idx_name_modified_at", "name, modified_at")
b.Option("DEFAULT CHARACTER SET", "utf8mb4")
sql = b.String()
t.Log(sql)
tt.Equal(`CREATE TEMPORARY TABLE IF NOT EXISTS user (id BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255) NOT NULL, created_at DATETIME NOT NULL, modified_at DATETIME NOT NULL, KEY idx_name_modified_at name, modified_at) DEFAULT CHARACTER SET utf8mb4`, sql)
}
func TestCreateTableQuick(t *testing.T) {
tt := zlsgo.NewTest(t)
for dialect, expected := range map[driver.Dialect]string{
&mysql.Config{}: "CREATE TABLE user (id bigint UNSIGNED PRIMARY KEY COMMENT 'ID', name varchar(100) COMMENT '用户名', created_at datetime NOT NULL COMMENT '创建时间') DEFAULT CHARACTER SET utf8mb4",
&sqlite3.Config{}: "CREATE TABLE user (id integer PRIMARY KEY, name text, created_at datetime NOT NULL)",
} {
b := builder.CreateTable("user")
d, ok := dialect.(driver.IfeConfig)
if !ok {
t.Errorf("%T is not zdb.IfeConfig", dialect)
}
_ = d
b.SetDriver(dialect)
b.Column(schema.NewField("id", uint8(0), func(field *schema.Field) {
field.PrimaryKey = true
field.Comment = "ID"
}))
b.Column(schema.NewField("name", "", func(field *schema.Field) {
field.Size = 100
field.NotNull = false
field.Comment = "用户名"
}))
b.Column(schema.NewField("created_at", time.Time{}, func(field *schema.Field) {
field.Size = 100
field.Comment = "创建时间"
}))
if dialect.Value() == driver.MySQL {
b.Option("DEFAULT CHARACTER SET", "utf8mb4")
}
sql := b.String()
tt.Equal(expected, sql)
}
}
func TestDropTable(t *testing.T) {
tt := zlsgo.NewTest(t)
sql := builder.NewTable("user").Drop()
tt.Equal("DROP TABLE user", sql)
}
func TestHasTable(t *testing.T) {
tt := zlsgo.NewTest(t)
sql, values, _ := builder.NewTable("shop").Has()
tt.Equal("SELECT count(*) AS count FROM sqlite_master WHERE type = 'table' AND name = ?", sql)
tt.Equal([]interface{}{"shop"}, values)
table := builder.NewTable("shop")
dialect := &sqlite3.Config{Memory: true}
table.SetDriver(dialect)
sql, values, process := table.Has()
tt.Equal("SELECT count(*) AS count FROM sqlite_master WHERE type = 'table' AND name = ?", sql)
tt.Equal([]interface{}{"shop"}, values)
rows, err := dialect.DB().Query(sql, values...)
tt.NoError(err)
data, _, _ := zdb.ScanToMap(rows)
t.Log(process(data))
{
table = builder.NewTable("shop")
table.SetDriver(&mysql.Config{
Dsn: "root:root@(127.0.0.1:3306)/test?charset=utf8mb4&parseTime=True&loc=Local",
})
sql, values, _ = table.Has()
tt.Equal("SELECT count(*) AS count FROM information_schema.tables WHERE table_schema = ? AND table_name = ? AND table_type = ?", sql)
tt.Equal([]interface{}{"test", "shop", "BASE TABLE"}, values)
}
{
table = builder.NewTable("shop")
table.SetDriver(&postgres.Config{
Dsn: "host=192.168.3.378 port=5432 user=postgres password=12345678 dbname=test sslmode=disable",
})
sql, values, _ = table.Has()
tt.Equal("SELECT count(*) AS count FROM information_schema.tables WHERE table_schema = ? AND table_name = ? AND table_type = ?", sql)
tt.Equal([]interface{}{"test", "shop", "BASE TABLE"}, values)
}
{
table = builder.NewTable("shop")
table.SetDriver(&mssql.Config{
Dsn: "sqlserver://mssql:12345678@localhost:9930?database=test",
})
sql, values, _ = table.Has()
tt.Equal("SELECT count(*) AS count FROM INFORMATION_SCHEMA.tables WHERE table_name = ? AND table_catalog = ?", sql)
tt.Equal([]interface{}{"shop", "test"}, values)
}
}
|
package shoppingCartController
import (
"github.com/gin-gonic/gin"
"hd-mall-ed/packages/client/models/shoppingCartModel"
"hd-mall-ed/packages/common/pkg/app"
"hd-mall-ed/packages/common/pkg/e"
)
/*
参数 idList 就行
*/
func Delete(c *gin.Context) {
api := app.ApiFunction{C: c}
model := &shoppingCartModel.ShoppingCart{}
idList := &[]uint{}
_ = c.ShouldBind(idList)
err := model.Delete(idList)
if err != nil {
api.ResFail(e.Fail)
return
}
api.ResponseNoData()
}
|
package main
import (
"fmt"
"strconv"
"strings"
"unicode"
)
func main() {
fmt.Println(myAtoi((" ")))
}
func myAtoi(s string) int {
// s should begin with number or sign
// 1. Trim spaces
sFormatted := strings.TrimLeft(s, " ")
sign := 1
startingInd := -1
endingInd := -1
signed := false
// 2. Search and read sign
for i, c := range sFormatted {
if startingInd == -1 {
if (c == rune('+') || c == rune('-')) && signed {
return 0
}
if c == rune('+') {
signed = true
} else if c == rune('-') {
sign = -1
signed = true
} else if unicode.IsDigit(c) {
startingInd = i
}
} else {
if !unicode.IsDigit(c) {
endingInd = i
break
}
}
}
if startingInd == -1 {
return 0
}
if !signed && startingInd > 0 {
return 0
}
if endingInd == -1 && startingInd != -1 {
endingInd = len(sFormatted)
}
retNum, _ := strconv.Atoi(sFormatted[startingInd:endingInd])
if sign == -1 && retNum > 2<<31 {
return 2 << 31 * -1
} else if sign == 1 && retNum > 2<<31-1 {
return 2<<31 - 1
} else {
return retNum * sign
}
}
// FASTER REALISATION
// func myAtoi(s string) int {
// for len(s) > 0 && s[0] == ' ' {
// s = s[1:]
// }
// if len(s) == 0 {
// return 0
// }
// neg := false
// switch s[0] {
// case '-':
// neg = true
// fallthrough
// case '+':
// s = s[1:]
// }
// var val int64
// for len(s) > 0 {
// d := s[0]
// if d < '0' || d > '9' {
// break
// }
// val *= 10
// val += int64(d-'0')
// if val >= 1<<31 {
// break
// }
// s = s[1:]
// }
// const (
// max = 1<<31-1
// min = -1<<31
// )
// if neg {
// if val = -val; val < min {
// val = min
// }
// } else if val > max {
// val = max
// }
// return int(val)
// }
|
package odoo
import (
"fmt"
)
// StockChangeProductQty represents stock.change.product.qty model.
type StockChangeProductQty struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
LocationId *Many2One `xmlrpc:"location_id,omptempty"`
LotId *Many2One `xmlrpc:"lot_id,omptempty"`
NewQuantity *Float `xmlrpc:"new_quantity,omptempty"`
ProductId *Many2One `xmlrpc:"product_id,omptempty"`
ProductTmplId *Many2One `xmlrpc:"product_tmpl_id,omptempty"`
ProductVariantCount *Int `xmlrpc:"product_variant_count,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// StockChangeProductQtys represents array of stock.change.product.qty model.
type StockChangeProductQtys []StockChangeProductQty
// StockChangeProductQtyModel is the odoo model name.
const StockChangeProductQtyModel = "stock.change.product.qty"
// Many2One convert StockChangeProductQty to *Many2One.
func (scpq *StockChangeProductQty) Many2One() *Many2One {
return NewMany2One(scpq.Id.Get(), "")
}
// CreateStockChangeProductQty creates a new stock.change.product.qty model and returns its id.
func (c *Client) CreateStockChangeProductQty(scpq *StockChangeProductQty) (int64, error) {
ids, err := c.CreateStockChangeProductQtys([]*StockChangeProductQty{scpq})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateStockChangeProductQty creates a new stock.change.product.qty model and returns its id.
func (c *Client) CreateStockChangeProductQtys(scpqs []*StockChangeProductQty) ([]int64, error) {
var vv []interface{}
for _, v := range scpqs {
vv = append(vv, v)
}
return c.Create(StockChangeProductQtyModel, vv)
}
// UpdateStockChangeProductQty updates an existing stock.change.product.qty record.
func (c *Client) UpdateStockChangeProductQty(scpq *StockChangeProductQty) error {
return c.UpdateStockChangeProductQtys([]int64{scpq.Id.Get()}, scpq)
}
// UpdateStockChangeProductQtys updates existing stock.change.product.qty records.
// All records (represented by ids) will be updated by scpq values.
func (c *Client) UpdateStockChangeProductQtys(ids []int64, scpq *StockChangeProductQty) error {
return c.Update(StockChangeProductQtyModel, ids, scpq)
}
// DeleteStockChangeProductQty deletes an existing stock.change.product.qty record.
func (c *Client) DeleteStockChangeProductQty(id int64) error {
return c.DeleteStockChangeProductQtys([]int64{id})
}
// DeleteStockChangeProductQtys deletes existing stock.change.product.qty records.
func (c *Client) DeleteStockChangeProductQtys(ids []int64) error {
return c.Delete(StockChangeProductQtyModel, ids)
}
// GetStockChangeProductQty gets stock.change.product.qty existing record.
func (c *Client) GetStockChangeProductQty(id int64) (*StockChangeProductQty, error) {
scpqs, err := c.GetStockChangeProductQtys([]int64{id})
if err != nil {
return nil, err
}
if scpqs != nil && len(*scpqs) > 0 {
return &((*scpqs)[0]), nil
}
return nil, fmt.Errorf("id %v of stock.change.product.qty not found", id)
}
// GetStockChangeProductQtys gets stock.change.product.qty existing records.
func (c *Client) GetStockChangeProductQtys(ids []int64) (*StockChangeProductQtys, error) {
scpqs := &StockChangeProductQtys{}
if err := c.Read(StockChangeProductQtyModel, ids, nil, scpqs); err != nil {
return nil, err
}
return scpqs, nil
}
// FindStockChangeProductQty finds stock.change.product.qty record by querying it with criteria.
func (c *Client) FindStockChangeProductQty(criteria *Criteria) (*StockChangeProductQty, error) {
scpqs := &StockChangeProductQtys{}
if err := c.SearchRead(StockChangeProductQtyModel, criteria, NewOptions().Limit(1), scpqs); err != nil {
return nil, err
}
if scpqs != nil && len(*scpqs) > 0 {
return &((*scpqs)[0]), nil
}
return nil, fmt.Errorf("stock.change.product.qty was not found with criteria %v", criteria)
}
// FindStockChangeProductQtys finds stock.change.product.qty records by querying it
// and filtering it with criteria and options.
func (c *Client) FindStockChangeProductQtys(criteria *Criteria, options *Options) (*StockChangeProductQtys, error) {
scpqs := &StockChangeProductQtys{}
if err := c.SearchRead(StockChangeProductQtyModel, criteria, options, scpqs); err != nil {
return nil, err
}
return scpqs, nil
}
// FindStockChangeProductQtyIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindStockChangeProductQtyIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(StockChangeProductQtyModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindStockChangeProductQtyId finds record id by querying it with criteria.
func (c *Client) FindStockChangeProductQtyId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(StockChangeProductQtyModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("stock.change.product.qty was not found with criteria %v and options %v", criteria, options)
}
|
package main
import (
"fmt"
"math"
"strconv"
)
func reverseStr(str string) string {
length := len(str)
var reversedStr string
for charIdx := 0; charIdx < length; charIdx++ {
reversedStr = string(str[charIdx]) + reversedStr
}
return reversedStr
}
func binaryToDecimal(bits string) float64 {
var base float64 = 2
var decimal float64
// 00000101 ~> 10100000 for easy loop
reversedBits := reverseStr(bits)
for x := 0; x < len(reversedBits); x++ {
// only count binary true
if reversedBits[x] == '1' {
decimal += math.Pow(base, float64(x))
}
}
return decimal
}
func decimalToHex(decimal float64) string {
src := decimal
var originSrc float64
var res string
hex := map[int]byte{}
hex[10] = 'A'
hex[11] = 'B'
hex[12] = 'C'
hex[13] = 'D'
hex[14] = 'E'
hex[15] = 'F'
for src > 0 {
originSrc = src
src = math.Floor(src / 16)
if src > 0 {
remainder := int(originSrc) % 16
got := false
for idx, item := range hex {
if idx == remainder {
got = true
res += string(item)
}
}
if !got {
res += strconv.Itoa(remainder)
}
} else {
got := false
for idx, item := range hex {
if idx == int(originSrc) {
got = true
res += string(item)
}
}
if !got {
res += strconv.Itoa(int(originSrc))
}
}
}
return reverseStr(res)
}
func hexToDecimal(hex string) float64 {
hexMap := map[byte]int{}
hexMap['A'] = 10
hexMap['B'] = 11
hexMap['C'] = 12
hexMap['D'] = 13
hexMap['E'] = 14
hexMap['F'] = 15
totalDigits := len(hex)
revStr := reverseStr(hex)
var decimal float64
for x := 0; x < totalDigits; x++ {
found := false
var replaceMentValue int
for idx, item := range hexMap {
if idx == revStr[x] {
found = true
replaceMentValue = item
}
}
if !found {
res, _ := strconv.Atoi(string(revStr[x]))
decimal += float64(res) * math.Pow(16, float64(x))
} else {
decimal += float64(replaceMentValue) * math.Pow(16, float64(x))
}
}
return decimal
}
func decimalToBinary(decimal float64) string {
src := decimal
var originSrc float64
var res string
for src > 0 {
originSrc = src
src = math.Floor(src / 2)
if src > 0 {
remainder := int(originSrc) % 2
res += strconv.Itoa(remainder)
} else {
res += strconv.Itoa(int(originSrc))
}
}
// auto add padding after, for 1 byte = 8 bits
for x := len(res); x < 8; x++ {
res += "0"
}
return reverseStr(res)
}
func main() {
// bin := 0B00000101
// oneByte := []byte("F")
// fmt.Println(bin)
// fmt.Printf("%08b %2x\n", oneByte, oneByte)
// fmt.Println(binaryToDecimal())
fmt.Println("111111111100110011000000 to Decimal:", binaryToDecimal("111111111100110011000000"))
fmt.Println("2345 to Hexadecimal:", decimalToHex(2345))
fmt.Println("278 to binary:", decimalToBinary(278))
fmt.Printf("ASCII: %08b \n", []byte("FFCCC0"))
fmt.Println("FFCCC0 to binary: ", decimalToBinary(hexToDecimal("FFCCC0")))
fmt.Print(hexToDecimal("FF"), hexToDecimal("F0"), hexToDecimal("0F"), hexToDecimal("11"))
fmt.Print(" = ")
fmt.Print(decimalToBinary(hexToDecimal("FF")), " ", decimalToBinary(hexToDecimal("F0")), " ", decimalToBinary(hexToDecimal("0F")), " ", decimalToBinary(hexToDecimal("11")), "\n")
fmt.Printf("Bitwise AND %08b \n", 0b10000001&0b10001001)
fmt.Printf("Bitwise OR %08b \n", 0b10000001|0b10001001)
fmt.Printf("Bitwise XOR or NOT %08b \n", 0b10000001^0b10001001)
fmt.Printf("Bitwise AND NOT %08b \n", 0b10010001&^0b10000001) // same like doing AND operation and then NOT/XOR operation
fmt.Printf("Left shift %08b \n", 0b10000001>>2)
fmt.Printf("RIght shift %08b \n", 0b10000001<<2)
// see https://golang.org/pkg/fmt/ for more formatting output
fmt.Printf("source %08b %d \n", 0b10000001, 0b10000001)
fmt.Printf("shift left by 2 %10b %d \n", 0b10000001<<2, 0b10000001<<2)
}
|
package HashCashProject
import (
"encoding/json"
"encoding/xml"
// "fmt"
zmq4 "github.com/pebbe/zmq4"
"io/ioutil"
"os"
"strconv"
"strings"
//"image/gif"
)
type sendString struct {
Pid int
MsgId int
Msg string
}
type sendint struct {
Pid int
MsgId int
Msg int
}
//to store info written in JSon config File
type ServerConf struct {
ID int `json:"ID"`
Host string `json:"Host"`
Port int `json:"Port"`
}
//main server object that represent a server
type ServerObj struct {
ID int //id of server
Host string //host address
Port int
Peers_o map[int]ServerConf //info of all other host
In_chnl chan *Envelope
Out_chnl chan *Envelope
myconn *zmq4.Socket //socket to litsen other
peer_conn map[int]*zmq4.Socket //socket that connect this server to all other server maped by id of that servre
}
//to maintain global information about all servers
type Allserver struct {
Servers []ServerConf
}
const (
BROADCAST = -1
)
//Structure of the envelope used to send msg
type Envelope struct {
Pid int
MsgId int
Msg interface{}
}
/*func (b Envelope) String() string {
return fmt.Sprintf("%b", b)
}
func (b Envelope) OtherString() string {
return fmt.Sprintf("%b", b.Msg)
}
*/
//basic Function that will be accessed by the server object
type Server interface {
Pid() int
Peers() []int
Outbox() chan *Envelope
Inbox() chan *Envelope
}
//will return the id of the server
func (current ServerObj) Pid() int {
return current.ID
}
//will return the all peer servers in the cluster
func (current ServerObj) Peers() map[int]ServerConf {
return current.Peers_o
}
//chandal to pass msg that has been received
func (current ServerObj) Inbox() chan *Envelope {
return current.In_chnl
}
//chandal will be used to send the msg
func (current ServerObj) Outbox() chan *Envelope {
return current.Out_chnl
}
//will be used internally to get all info from file
func getAllserver(cofg string) Allserver {
file, _ := ioutil.ReadFile(cofg)
var jsontype Allserver
json.Unmarshal(file, &jsontype)
if len(jsontype.Servers) < 1 {
//exiting after showing msg if there is no server or there is empty or corrupted file
panic("Either Something wrong with config file or file has not valid info or check the config file path")
os.Exit(29)
}
//returning all server
return jsontype
}
// to send the envelope over the network we need string this will provide formatted string fro json encoding ans decoding of the message
var sendINT, rCVINT sendint
var sendStrings, rCVSTR sendString
func wrapMsg(msg Envelope) string {
var send string
switch msg.Msg.(type) {
case string:
sendStrings.Msg = msg.Msg.(string)
sendStrings.MsgId = msg.MsgId
sendStrings.Pid = msg.Pid
x, _ := xml.Marshal(sendStrings)
send = string(x)
return send
case int:
sendINT.Msg = msg.Msg.(int)
sendINT.MsgId = msg.MsgId
sendINT.Pid = msg.Pid
x, _ := xml.Marshal(sendINT)
send = string(x)
return send
}
return send
}
// this will decode msg back to Envelope structure from the json encode msg
func unwrapMs(msg string) Envelope {
var returnEnvelp Envelope
binmsg := []byte(msg)
switch {
case strings.Contains(msg, "<sendint>"):
xml.Unmarshal(binmsg, &rCVINT)
returnEnvelp.MsgId = rCVINT.MsgId
returnEnvelp.Pid = rCVINT.Pid
returnEnvelp.Msg = rCVINT.Msg
case strings.Contains(msg, "<sendString>"):
xml.Unmarshal(binmsg, &rCVSTR)
returnEnvelp.MsgId = rCVSTR.MsgId
returnEnvelp.Pid = rCVSTR.Pid
returnEnvelp.Msg = rCVSTR.Msg
return returnEnvelp
}
return returnEnvelp
}
//here is the all stuff that required to create a server object
func New(id int, cofg string) ServerObj {
//Registering All Structure that we will use
//getting info abut all server that is available
global_object := getAllserver(cofg)
var newServer ServerObj
mypeer := make(map[int]ServerConf)
// first create all peer map object and initialize the server object
for i := range global_object.Servers {
if global_object.Servers[i].ID != id {
//add to peers list if not server itself
mypeer[global_object.Servers[i].ID] = global_object.Servers[i]
} else {
//add info of self if not peer
newServer.ID = global_object.Servers[i].ID
newServer.Host = global_object.Servers[i].Host
newServer.Port = global_object.Servers[i].Port
}
}
//assign everything that is related to this server
newServer.Peers_o = mypeer
// initialising outbox inbox chalnal
newServer.In_chnl = make(chan *Envelope)
newServer.Out_chnl = make(chan *Envelope)
//server itself start listnig at port defined in config file
bindConn := "tcp://*:" + strconv.Itoa(newServer.Port)
myconn, _ := zmq4.NewSocket(zmq4.PULL)
myconn.Bind(bindConn)
newServer.myconn = myconn
// initialising socket for the peer server
newServer.peer_conn = make(map[int]*zmq4.Socket)
for key, srvr := range newServer.Peers_o {
if key != newServer.ID {
conect := "tcp://" + srvr.Host + ":" + strconv.Itoa(srvr.Port)
conn, er01 := zmq4.NewSocket(zmq4.PUSH)
if er01 != nil {
panic(er01)
}
conn.Connect(conect)
newServer.peer_conn[key] = conn
}
}
//go routing to use receive msg from peer
go func() {
for {
rcvmsg, er := myconn.Recv(0)
if er != nil {
panic(er)
}
var msg Envelope
msg = unwrapMs(rcvmsg)
//print("aaya ")
//println(newServer.ID)
newServer.Inbox() <- &msg
}
}()
//go routine to send data that is coming over out chanel
go func() {
for {
select {
case x := <-newServer.Out_chnl:
var msg Envelope
msg = *x
if msg.Pid != BROADCAST && msg.Pid != newServer.ID {
//print(msg.Pid)
//println(" sending ..")
newServer.peer_conn[msg.Pid].Send(wrapMsg(Envelope{Pid: newServer.Pid(), MsgId: msg.MsgId, Msg: msg.Msg}), 0)
} else {
for _, sockpeer := range newServer.peer_conn {
sockpeer.Send(wrapMsg(Envelope{Pid: newServer.Pid(), MsgId: msg.MsgId, Msg: msg.Msg}), 0)
}
}
}
}
}()
//now returning this Serverobj
return newServer
}
|
package main
import (
"log"
"net/http"
)
func homePage(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello World!!"))
}
func usersPage(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Load users page!!"))
}
func main() {
//HTTP é um protocolo de comunicação
//Cliente (Faz requisição) - Servidor (Processa requisição e envia resposta)
//Request - Response
//Rotas
//URI - Identificador do recurso
//Método - GET, POST, PUT, DELETE
http.HandleFunc("/home", homePage)
http.HandleFunc("/users", usersPage)
log.Fatal(http.ListenAndServe(":5000", nil))
}
|
package helpers
type ParsedParam struct {
ParamType `json:",inline"`
Line int `json:"linenumber"`
Loc []int `json:"location"`
Default string `json:"default"`
}
type ParamType struct {
Category string `json:"category"`
Name string `json:"name"`
}
type Template struct {
Name string `json:"name"`
Yaml string `json:"yaml"`
PParams []ParsedParam `json:"params"`
}
// This can be used to store additional information.
type ConfigMapData struct {
Templates []Template `json:"templates"`
}
type FinalMapping struct {
ParsedParam `json:",inline"`
FinalValue string `json:"finalValue"`
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package concurrency
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/intentresolver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/stretchr/testify/require"
)
type mockIntentResolver struct {
pushTxn func(context.Context, *enginepb.TxnMeta, roachpb.Header, roachpb.PushTxnType) (*roachpb.Transaction, *Error)
resolveIntent func(context.Context, roachpb.LockUpdate) *Error
resolveIntents func(context.Context, []roachpb.LockUpdate) *Error
}
// mockIntentResolver implements the IntentResolver interface.
func (m *mockIntentResolver) PushTransaction(
ctx context.Context, txn *enginepb.TxnMeta, h roachpb.Header, pushType roachpb.PushTxnType,
) (*roachpb.Transaction, *Error) {
return m.pushTxn(ctx, txn, h, pushType)
}
func (m *mockIntentResolver) ResolveIntent(
ctx context.Context, intent roachpb.LockUpdate, _ intentresolver.ResolveOptions,
) *Error {
return m.resolveIntent(ctx, intent)
}
func (m *mockIntentResolver) ResolveIntents(
ctx context.Context, intents []roachpb.LockUpdate, opts intentresolver.ResolveOptions,
) *Error {
return m.resolveIntents(ctx, intents)
}
type mockLockTableGuard struct {
state waitingState
signal chan struct{}
stateObserved chan struct{}
toResolve []roachpb.LockUpdate
}
// mockLockTableGuard implements the lockTableGuard interface.
func (g *mockLockTableGuard) ShouldWait() bool { return true }
func (g *mockLockTableGuard) NewStateChan() chan struct{} { return g.signal }
func (g *mockLockTableGuard) CurState() waitingState {
s := g.state
if g.stateObserved != nil {
g.stateObserved <- struct{}{}
}
return s
}
func (g *mockLockTableGuard) ResolveBeforeScanning() []roachpb.LockUpdate {
return g.toResolve
}
func (g *mockLockTableGuard) notify() { g.signal <- struct{}{} }
// mockLockTable overrides TransactionIsFinalized, which is the only LockTable
// method that should be called in this test.
type mockLockTable struct {
lockTableImpl
txnFinalizedFn func(txn *roachpb.Transaction)
}
func (lt *mockLockTable) TransactionIsFinalized(txn *roachpb.Transaction) {
lt.txnFinalizedFn(txn)
}
func setupLockTableWaiterTest() (*lockTableWaiterImpl, *mockIntentResolver, *mockLockTableGuard) {
ir := &mockIntentResolver{}
st := cluster.MakeTestingClusterSettings()
LockTableLivenessPushDelay.Override(&st.SV, 0)
LockTableDeadlockDetectionPushDelay.Override(&st.SV, 0)
manual := hlc.NewManualClock(123)
guard := &mockLockTableGuard{
signal: make(chan struct{}, 1),
}
w := &lockTableWaiterImpl{
st: st,
clock: hlc.NewClock(manual.UnixNano, time.Nanosecond),
stopper: stop.NewStopper(),
ir: ir,
lt: &mockLockTable{},
}
return w, ir, guard
}
func makeTxnProto(name string) roachpb.Transaction {
return roachpb.MakeTransaction(name, []byte("key"), 0, hlc.Timestamp{WallTime: 10}, 0)
}
// TestLockTableWaiterWithTxn tests the lockTableWaiter's behavior under
// different waiting states while a transactional request is waiting.
func TestLockTableWaiterWithTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
uncertaintyLimit := hlc.Timestamp{WallTime: 15}
makeReq := func() Request {
txn := makeTxnProto("request")
txn.GlobalUncertaintyLimit = uncertaintyLimit
return Request{
Txn: &txn,
Timestamp: txn.ReadTimestamp,
}
}
t.Run("state", func(t *testing.T) {
t.Run("waitFor", func(t *testing.T) {
testWaitPush(t, waitFor, makeReq, uncertaintyLimit)
})
t.Run("waitForDistinguished", func(t *testing.T) {
testWaitPush(t, waitForDistinguished, makeReq, uncertaintyLimit)
})
t.Run("waitElsewhere", func(t *testing.T) {
testWaitPush(t, waitElsewhere, makeReq, uncertaintyLimit)
})
t.Run("waitSelf", func(t *testing.T) {
testWaitNoopUntilDone(t, waitSelf, makeReq)
})
t.Run("doneWaiting", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
g.state = waitingState{kind: doneWaiting}
g.notify()
err := w.WaitOn(ctx, makeReq(), g)
require.Nil(t, err)
})
})
t.Run("ctx done", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
ctxWithCancel, cancel := context.WithCancel(ctx)
go cancel()
err := w.WaitOn(ctxWithCancel, makeReq(), g)
require.NotNil(t, err)
require.Equal(t, context.Canceled.Error(), err.GoError().Error())
})
t.Run("stopper quiesce", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
go func() {
w.stopper.Quiesce(ctx)
}()
err := w.WaitOn(ctx, makeReq(), g)
require.NotNil(t, err)
require.IsType(t, &roachpb.NodeUnavailableError{}, err.GetDetail())
})
}
// TestLockTableWaiterWithNonTxn tests the lockTableWaiter's behavior under
// different waiting states while a non-transactional request is waiting.
func TestLockTableWaiterWithNonTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
reqHeaderTS := hlc.Timestamp{WallTime: 10}
makeReq := func() Request {
return Request{
Timestamp: reqHeaderTS,
Priority: roachpb.NormalUserPriority,
}
}
t.Run("state", func(t *testing.T) {
t.Run("waitFor", func(t *testing.T) {
t.Log("waitFor does not cause non-transactional requests to push")
testWaitNoopUntilDone(t, waitFor, makeReq)
})
t.Run("waitForDistinguished", func(t *testing.T) {
testWaitPush(t, waitForDistinguished, makeReq, reqHeaderTS)
})
t.Run("waitElsewhere", func(t *testing.T) {
testWaitPush(t, waitElsewhere, makeReq, reqHeaderTS)
})
t.Run("waitSelf", func(t *testing.T) {
t.Log("waitSelf is not possible for non-transactional request")
})
t.Run("doneWaiting", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
g.state = waitingState{kind: doneWaiting}
g.notify()
err := w.WaitOn(ctx, makeReq(), g)
require.Nil(t, err)
})
})
t.Run("ctx done", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
ctxWithCancel, cancel := context.WithCancel(ctx)
go cancel()
err := w.WaitOn(ctxWithCancel, makeReq(), g)
require.NotNil(t, err)
require.Equal(t, context.Canceled.Error(), err.GoError().Error())
})
t.Run("stopper quiesce", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
go func() {
w.stopper.Quiesce(ctx)
}()
err := w.WaitOn(ctx, makeReq(), g)
require.NotNil(t, err)
require.IsType(t, &roachpb.NodeUnavailableError{}, err.GetDetail())
})
}
func testWaitPush(t *testing.T, k waitKind, makeReq func() Request, expPushTS hlc.Timestamp) {
ctx := context.Background()
keyA := roachpb.Key("keyA")
testutils.RunTrueAndFalse(t, "lockHeld", func(t *testing.T, lockHeld bool) {
testutils.RunTrueAndFalse(t, "waitAsWrite", func(t *testing.T, waitAsWrite bool) {
w, ir, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
pusheeTxn := makeTxnProto("pushee")
req := makeReq()
g.state = waitingState{
kind: k,
txn: &pusheeTxn.TxnMeta,
key: keyA,
held: lockHeld,
guardAccess: spanset.SpanReadOnly,
}
if waitAsWrite {
g.state.guardAccess = spanset.SpanReadWrite
}
g.notify()
// waitElsewhere does not cause a push if the lock is not held.
// It returns immediately.
if k == waitElsewhere && !lockHeld {
err := w.WaitOn(ctx, req, g)
require.Nil(t, err)
return
}
// Non-transactional requests do not push reservations, only locks.
// They wait for doneWaiting.
if req.Txn == nil && !lockHeld {
defer notifyUntilDone(t, g)()
err := w.WaitOn(ctx, req, g)
require.Nil(t, err)
return
}
ir.pushTxn = func(
_ context.Context,
pusheeArg *enginepb.TxnMeta,
h roachpb.Header,
pushType roachpb.PushTxnType,
) (*roachpb.Transaction, *Error) {
require.Equal(t, &pusheeTxn.TxnMeta, pusheeArg)
require.Equal(t, req.Txn, h.Txn)
require.Equal(t, expPushTS, h.Timestamp)
if waitAsWrite || !lockHeld {
require.Equal(t, roachpb.PUSH_ABORT, pushType)
} else {
require.Equal(t, roachpb.PUSH_TIMESTAMP, pushType)
}
resp := &roachpb.Transaction{TxnMeta: *pusheeArg, Status: roachpb.ABORTED}
// If the lock is held, we'll try to resolve it now that
// we know the holder is ABORTED. Otherwise, immediately
// tell the request to stop waiting.
if lockHeld {
w.lt.(*mockLockTable).txnFinalizedFn = func(txn *roachpb.Transaction) {
require.Equal(t, pusheeTxn.ID, txn.ID)
require.Equal(t, roachpb.ABORTED, txn.Status)
}
ir.resolveIntent = func(_ context.Context, intent roachpb.LockUpdate) *Error {
require.Equal(t, keyA, intent.Key)
require.Equal(t, pusheeTxn.ID, intent.Txn.ID)
require.Equal(t, roachpb.ABORTED, intent.Status)
g.state = waitingState{kind: doneWaiting}
g.notify()
return nil
}
} else {
g.state = waitingState{kind: doneWaiting}
g.notify()
}
return resp, nil
}
err := w.WaitOn(ctx, req, g)
require.Nil(t, err)
})
})
}
func testWaitNoopUntilDone(t *testing.T, k waitKind, makeReq func() Request) {
ctx := context.Background()
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
txn := makeTxnProto("noop-wait-txn")
g.state = waitingState{
kind: k,
txn: &txn.TxnMeta,
}
g.notify()
defer notifyUntilDone(t, g)()
err := w.WaitOn(ctx, makeReq(), g)
require.Nil(t, err)
}
func notifyUntilDone(t *testing.T, g *mockLockTableGuard) func() {
// Set up an observer channel to detect when the current
// waiting state is observed.
g.stateObserved = make(chan struct{})
done := make(chan struct{})
go func() {
<-g.stateObserved
g.notify()
<-g.stateObserved
g.state = waitingState{kind: doneWaiting}
g.notify()
<-g.stateObserved
close(done)
}()
return func() { <-done }
}
func TestLockTableWaiterWithErrorWaitPolicy(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
uncertaintyLimit := hlc.Timestamp{WallTime: 15}
makeReq := func() Request {
txn := makeTxnProto("request")
txn.GlobalUncertaintyLimit = uncertaintyLimit
return Request{
Txn: &txn,
Timestamp: txn.ReadTimestamp,
WaitPolicy: lock.WaitPolicy_Error,
}
}
t.Run("state", func(t *testing.T) {
t.Run("waitFor", func(t *testing.T) {
testErrorWaitPush(t, waitFor, makeReq, uncertaintyLimit)
})
t.Run("waitForDistinguished", func(t *testing.T) {
testErrorWaitPush(t, waitForDistinguished, makeReq, uncertaintyLimit)
})
t.Run("waitElsewhere", func(t *testing.T) {
testErrorWaitPush(t, waitElsewhere, makeReq, uncertaintyLimit)
})
t.Run("waitSelf", func(t *testing.T) {
testWaitNoopUntilDone(t, waitSelf, makeReq)
})
t.Run("doneWaiting", func(t *testing.T) {
w, _, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
g.state = waitingState{kind: doneWaiting}
g.notify()
err := w.WaitOn(ctx, makeReq(), g)
require.Nil(t, err)
})
})
}
func testErrorWaitPush(t *testing.T, k waitKind, makeReq func() Request, expPushTS hlc.Timestamp) {
ctx := context.Background()
keyA := roachpb.Key("keyA")
testutils.RunTrueAndFalse(t, "lockHeld", func(t *testing.T, lockHeld bool) {
w, ir, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
pusheeTxn := makeTxnProto("pushee")
req := makeReq()
g.state = waitingState{
kind: k,
txn: &pusheeTxn.TxnMeta,
key: keyA,
held: lockHeld,
guardAccess: spanset.SpanReadOnly,
}
g.notify()
// If the lock is not held, expect an error immediately. The one
// exception to this is waitElsewhere, which expects no error.
if !lockHeld {
err := w.WaitOn(ctx, req, g)
if k == waitElsewhere {
require.Nil(t, err)
} else {
require.NotNil(t, err)
require.Regexp(t, "conflicting intents", err)
}
return
}
ir.pushTxn = func(
_ context.Context,
pusheeArg *enginepb.TxnMeta,
h roachpb.Header,
pushType roachpb.PushTxnType,
) (*roachpb.Transaction, *Error) {
require.Equal(t, &pusheeTxn.TxnMeta, pusheeArg)
require.Equal(t, req.Txn, h.Txn)
require.Equal(t, expPushTS, h.Timestamp)
require.Equal(t, roachpb.PUSH_TOUCH, pushType)
resp := &roachpb.Transaction{TxnMeta: *pusheeArg, Status: roachpb.ABORTED}
// Next, we'll try to resolve the lock now that we know the
// holder is ABORTED.
w.lt.(*mockLockTable).txnFinalizedFn = func(txn *roachpb.Transaction) {
require.Equal(t, pusheeTxn.ID, txn.ID)
require.Equal(t, roachpb.ABORTED, txn.Status)
}
ir.resolveIntent = func(_ context.Context, intent roachpb.LockUpdate) *Error {
require.Equal(t, keyA, intent.Key)
require.Equal(t, pusheeTxn.ID, intent.Txn.ID)
require.Equal(t, roachpb.ABORTED, intent.Status)
g.state = waitingState{kind: doneWaiting}
g.notify()
return nil
}
return resp, nil
}
err := w.WaitOn(ctx, req, g)
require.Nil(t, err)
})
}
// TestLockTableWaiterIntentResolverError tests that the lockTableWaiter
// propagates errors from its intent resolver when it pushes transactions
// or resolves their intents.
func TestLockTableWaiterIntentResolverError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
w, ir, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
err1 := roachpb.NewErrorf("error1")
err2 := roachpb.NewErrorf("error2")
txn := makeTxnProto("request")
req := Request{
Txn: &txn,
Timestamp: txn.ReadTimestamp,
}
// Test with both synchronous and asynchronous pushes.
// See the comments on pushLockTxn and pushRequestTxn.
testutils.RunTrueAndFalse(t, "sync", func(t *testing.T, sync bool) {
keyA := roachpb.Key("keyA")
pusheeTxn := makeTxnProto("pushee")
lockHeld := sync
g.state = waitingState{
kind: waitForDistinguished,
txn: &pusheeTxn.TxnMeta,
key: keyA,
held: lockHeld,
guardAccess: spanset.SpanReadWrite,
}
// Errors are propagated when observed while pushing transactions.
g.notify()
ir.pushTxn = func(
_ context.Context, _ *enginepb.TxnMeta, _ roachpb.Header, _ roachpb.PushTxnType,
) (*roachpb.Transaction, *Error) {
return nil, err1
}
err := w.WaitOn(ctx, req, g)
require.Equal(t, err1, err)
if lockHeld {
// Errors are propagated when observed while resolving intents.
g.notify()
ir.pushTxn = func(
_ context.Context, _ *enginepb.TxnMeta, _ roachpb.Header, _ roachpb.PushTxnType,
) (*roachpb.Transaction, *Error) {
return &pusheeTxn, nil
}
ir.resolveIntent = func(_ context.Context, intent roachpb.LockUpdate) *Error {
return err2
}
err = w.WaitOn(ctx, req, g)
require.Equal(t, err2, err)
}
})
}
// TestLockTableWaiterDeferredIntentResolverError tests that the lockTableWaiter
// propagates errors from its intent resolver when it resolves intent batches.
func TestLockTableWaiterDeferredIntentResolverError(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
w, ir, g := setupLockTableWaiterTest()
defer w.stopper.Stop(ctx)
txn := makeTxnProto("request")
req := Request{
Txn: &txn,
Timestamp: txn.ReadTimestamp,
}
keyA := roachpb.Key("keyA")
pusheeTxn := makeTxnProto("pushee")
// Make the pusheeTxn ABORTED so that the request avoids the transaction
// record push and defers the intent resolution.
pusheeTxn.Status = roachpb.ABORTED
g.state = waitingState{
kind: doneWaiting,
guardAccess: spanset.SpanReadWrite,
}
g.toResolve = []roachpb.LockUpdate{
roachpb.MakeLockUpdate(&pusheeTxn, roachpb.Span{Key: keyA}),
}
g.notify()
// Errors are propagated when observed while resolving batches of intents.
err1 := roachpb.NewErrorf("error1")
ir.resolveIntents = func(_ context.Context, intents []roachpb.LockUpdate) *Error {
require.Len(t, intents, 1)
require.Equal(t, keyA, intents[0].Key)
require.Equal(t, pusheeTxn.ID, intents[0].Txn.ID)
require.Equal(t, roachpb.ABORTED, intents[0].Status)
return err1
}
err := w.WaitOn(ctx, req, g)
require.Equal(t, err1, err)
}
func TestTxnCache(t *testing.T) {
var c txnCache
const overflow = 4
var txns [len(c.txns) + overflow]roachpb.Transaction
for i := range txns {
txns[i] = makeTxnProto(fmt.Sprintf("txn %d", i))
}
// Add each txn to the cache. Observe LRU eviction policy.
for i := range txns {
txn := &txns[i]
c.add(txn)
for j, txnInCache := range c.txns {
if j <= i {
require.Equal(t, &txns[i-j], txnInCache)
} else {
require.Nil(t, txnInCache)
}
}
}
// Access each txn in the cache in reverse order.
// Should reverse the order of the cache because of LRU policy.
for i := len(txns) - 1; i >= 0; i-- {
txn := &txns[i]
txnInCache, ok := c.get(txn.ID)
if i < overflow {
// Expect overflow.
require.Nil(t, txnInCache)
require.False(t, ok)
} else {
// Should be in cache.
require.Equal(t, txn, txnInCache)
require.True(t, ok)
}
}
// Cache should be in order again.
for i, txnInCache := range c.txns {
require.Equal(t, &txns[i+overflow], txnInCache)
}
}
func BenchmarkTxnCache(b *testing.B) {
rng := rand.New(rand.NewSource(timeutil.Now().UnixNano()))
var c txnCache
var txns [len(c.txns) + 4]roachpb.Transaction
for i := range txns {
txns[i] = makeTxnProto(fmt.Sprintf("txn %d", i))
}
txnOps := make([]*roachpb.Transaction, b.N)
for i := range txnOps {
txnOps[i] = &txns[rng.Intn(len(txns))]
}
b.ResetTimer()
for i, txnOp := range txnOps {
if i%2 == 0 {
c.add(txnOp)
} else {
_, _ = c.get(txnOp.ID)
}
}
}
func TestContentionEventHelper(t *testing.T) {
// This is mostly a regression test that ensures that we don't
// accidentally update tBegin when continuing to handle the same event.
// General coverage of the helper results from TestConcurrencyManagerBasic.
tr := tracing.NewTracer()
sp := tr.StartSpan("foo", tracing.WithForceRealSpan())
var sl []*roachpb.ContentionEvent
h := contentionEventHelper{
sp: sp,
onEvent: func(ev *roachpb.ContentionEvent) {
sl = append(sl, ev)
},
}
txn := makeTxnProto("foo")
h.emitAndInit(waitingState{
kind: waitForDistinguished,
key: roachpb.Key("a"),
txn: &txn.TxnMeta,
})
require.Empty(t, sl)
require.NotZero(t, h.tBegin)
tBegin := h.tBegin
// Another event for the same txn/key should not mutate tBegin
// or emit an event.
h.emitAndInit(waitingState{
kind: waitFor,
key: roachpb.Key("a"),
txn: &txn.TxnMeta,
})
require.Empty(t, sl)
require.Equal(t, tBegin, h.tBegin)
h.emitAndInit(waitingState{
kind: waitForDistinguished,
key: roachpb.Key("b"),
txn: &txn.TxnMeta,
})
require.Len(t, sl, 1)
require.Equal(t, txn.TxnMeta, sl[0].TxnMeta)
require.Equal(t, roachpb.Key("a"), sl[0].Key)
require.NotZero(t, sl[0].Duration)
}
|
package linqo
import "testing"
func TestSelect(t *testing.T) {
const expected = `SELECT firstName,lastName FROM customers WHERE ((totalSpending BETWEEN 100 AND 1000) OR (totalSpending >= 10000)) ORDER BY lastName DESC,firstName DESC;`
stmt := Select("firstName", "lastName").
From("customers").
Where(Or(
Between("totalSpending", "100", "1000"),
GreaterOrEqual("totalSpending", "10000"))).
OrderBy(
SortSpec{
Key: "lastName",
Order: Descending,
},
SortSpec{
Key: "firstName",
Order: Descending,
})
actual := stmt.String()
if actual != expected {
t.Errorf("\nExpected: %s\n Actual: %s", expected, actual)
}
}
func TestSelect2(t *testing.T) {
const expected = `SELECT * FROM customers GROUP BY lastName;`
stmt := Select().
From("customers").
GroupBy("lastName")
actual := stmt.String()
if actual != expected {
t.Errorf("\nExpected: %s\n Actual: %s", expected, actual)
}
}
func TestSelectDistinct(t *testing.T) {
const expected = `SELECT DISTINCT firstName,lastName FROM customers WHERE ((totalSpending BETWEEN 100 AND 1000) OR (totalSpending >= 10000)) ORDER BY lastName COLLATE mycollate DESC,firstName DESC;`
stmt := SelectDistinct("firstName", "lastName").
From("customers").
Where(Or(
Between("totalSpending", "100", "1000"),
GreaterOrEqual("totalSpending", "10000"))).
OrderBy(
SortSpec{
Key: "lastName",
Collate: "mycollate",
Order: Descending,
},
SortSpec{
Key: "firstName",
Order: Descending,
})
actual := stmt.String()
if actual != expected {
t.Errorf("\nExpected: %s\n Actual: %s", expected, actual)
}
}
|
package v1_test
import (
"testing"
knewer "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
/*kolder "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"*/
newer "github.com/openshift/origin/pkg/build/api"
older "github.com/openshift/origin/pkg/build/api/v1"
)
var Convert = knewer.Scheme.Convert
func TestImageChangeTriggerDefaultValueConversion(t *testing.T) {
var actual newer.BuildTriggerPolicy
oldVersion := older.BuildTriggerPolicy{
Type: older.ImageChangeBuildTriggerType,
}
err := Convert(&oldVersion, &actual)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if actual.ImageChange == nil {
t.Errorf("expected %v, actual %v", &newer.ImageChangeTrigger{}, nil)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.