text stringlengths 11 4.05M |
|---|
package proxy
import (
"github.com/devopsfaith/krakend/config"
"github.com/devopsfaith/krakend/logging"
)
// Factory creates proxies based on the received endpoint configuration.
//
// Both, factories and backend factories, create proxies but factories are designed as a stack makers
// because they are intended to generate the complete proxy stack for a given frontend endpoint
// the app would expose and they could wrap several proxies provided by a backend factory
type Factory interface {
New(cfg *config.EndpointConfig) (Proxy, error)
}
// DefaultFactory returns a default http proxy factory with the injected logger
func DefaultFactory(logger logging.Logger) Factory {
return NewDefaultFactory(httpProxy, logger)
}
// NewDefaultFactory returns a default proxy factory with the injected proxy builder and logger
func NewDefaultFactory(backendFactory BackendFactory, logger logging.Logger) Factory {
return defaultFactory{backendFactory, logger}
}
type defaultFactory struct {
backendFactory BackendFactory
logger logging.Logger
}
// New implements the Factory interface
func (pf defaultFactory) New(cfg *config.EndpointConfig) (p Proxy, err error) {
switch len(cfg.Backend) {
case 0:
err = ErrNoBackends
case 1:
p, err = pf.newSingle(cfg)
default:
p, err = pf.newMulti(cfg)
}
return
}
func (pf defaultFactory) newMulti(cfg *config.EndpointConfig) (p Proxy, err error) {
backendProxy := make([]Proxy, len(cfg.Backend))
for i, backend := range cfg.Backend {
backendProxy[i] = pf.backendFactory(backend)
backendProxy[i] = NewRoundRobinLoadBalancedMiddleware(backend)(backendProxy[i])
if backend.ConcurrentCalls > 1 {
backendProxy[i] = NewConcurrentMiddleware(backend)(backendProxy[i])
}
backendProxy[i] = NewRequestBuilderMiddleware(backend)(backendProxy[i])
}
p = NewMergeDataMiddleware(cfg)(backendProxy...)
return
}
func (pf defaultFactory) newSingle(cfg *config.EndpointConfig) (p Proxy, err error) {
p = pf.backendFactory(cfg.Backend[0])
p = NewRoundRobinLoadBalancedMiddleware(cfg.Backend[0])(p)
if cfg.Backend[0].ConcurrentCalls > 1 {
p = NewConcurrentMiddleware(cfg.Backend[0])(p)
}
p = NewRequestBuilderMiddleware(cfg.Backend[0])(p)
return
}
|
package constants
import (
"time"
)
const (
EthNetwork = "Rinkeby@Ethereum"
TenderMintNetwork = "SENTTEST@Tendermint"
EthAddr = "ETHADDR"
Timestamp = "TIMESTAMP"
TimestampTM = "TIMESTAMPTM"
Node = "NODE"
NodeTM = "NODETM"
Bandwidth = "BANDWIDTH"
BandwidthTM = "BANDWIDTHTM"
NodeWallet = "NODEWALLET"
NodeWalletTM = "NODEWaLLETTM"
NodePrice = "NODEPRICE"
NodePriceTM = "NODEPRICETM"
BlockchainNetwork = "BLOCKCHAINNETWORK"
TMHashLength = 40
TimeLimit = 30
TMPrefix = "cosmosaccaddr"
WalletTM = "WALLETTM"
TMWalletLength = 52
TenDays = time.Hour * 24 * 10
Month = time.Hour * 24 * 30
ThreeMonths = time.Hour * 24 * 90
NodeBasePrice = "10"
NodeMonthPrice = "30"
NodeThreeMonthPrice = "80"
ThreeM = "90 Days"
OneM = "30 Days"
TenD = "10 Days"
ZFill = "000000000000000000000000"
IPAddr = "IPADDR"
IPAddrTM = "IPADDRTM"
AssignedNodeURI = "ASSIGNEDNODEURI"
AssignedNodeURITM = "ASSIGNEDNODEURITM"
IsAuth = "ISAUTH"
IsAuthTM = "ISAUTHTM"
Password = "PASSWORD"
PasswordTM = "PASSWORDTM"
TestSentURI1 = `https://api-rinkeby.etherscan.io/api?apikey=Y5BJ5VA3XZ59F63XQCQDDUWU2C29144MMM&module=logs&action=getLogs&fromBlock=0&toBlock=latest&address=0x29317B796510afC25794E511e7B10659Ca18048B&topic0=0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef&topic0_1_opr=and&topic1=`
TestSendURI2 = `&topic1_2_opr=or&topic2=`
TMTxnURL = "http://localhost:1317/txs/%s"
EthRegex = "^(0x){1}[0-9a-fA-F]{40}$"
ReplyButton = "replyButton"
InlineButton = "inlineButton"
ProxyURL = "https://t.me/socks?server=%s&port=%s&user=%s&pass=%s"
IPLEAKURL = "https://ipleak.net/json/"
NodeBaseUrl = "http://%s:30002/user"
GetTXNFromMN = "http://35.154.179.57:8000/txes?fromAccountAddress=%s"
PasswordLength = 12
SentinelTONURL = "http://35.154.179.57:8000/nodes?type=OpenVPN&status=up"
TMBalanceURL = "http://localhost:1317/accounts/%s"
NodeType = "tendermint"
EthState = "ETHSTATE"
TMTimeLimit = "TMTimeLimit"
TMState = "TMSTATE"
NoState = -1
MinBal = 10000000
)
const (
EthState0 = iota + 1
EthState1
EthState2
EthState3
EthState4
)
const (
TMState0 = iota + 1
TMState1
TMState2
TMState3
TMState4
TMState5
)
|
package models
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"errors"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"strings"
"time"
"wishCollection/utility"
"encoding/json"
"fmt"
uuid "github.com/satori/go.uuid"
)
var (
firstName []string
lastName []string
emailType []string
contrys []string
)
func init() {
initValue()
}
type LoginInfo struct {
Msg string `json:"msg"`
Code int `json:"code"`
Data struct {
AlreadyHadApp bool `json:"already_had_app"`
SessionToken string `json:"session_token"`
NewUser bool `json:"new_user"`
DeferredDeepLinkType interface{} `json:"deferred_deep_link_type"`
SignupFlowType string `json:"signup_flow_type"`
User string `json:"user"`
DeferredDeepLinkPid interface{} `json:"deferred_deep_link_pid"`
} `json:"data"`
SweeperUUID string `json:"sweeper_uuid"`
NotiCount int `json:"noti_count"`
}
type User struct {
Id int64 `orm:"auto"`
Baid string `json:"baid"`
SweeperSession string `json:"sweeper_session"`
Email string
Password string
RiskifiedSessionToken string `json:"riskified_session_token"`
AdvertiserId string `json:"advertiser_id"`
AppDeviceID string
Country string `json:"country"`
FullName string
HasAddress int
Invalid int //账号是否被封 , 0代表没有封。1代表账号被封
UserId string
Updated time.Time `orm:"auto_now;type(datetime)"`
}
func RegisterUser() (user User, err error) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
f := firstName[r.Intn(len(firstName))]
l := lastName[r.Intn(len(lastName))]
e := fmt.Sprintf("%d%s", time.Now().Unix(), emailType[r.Intn(len(emailType))])
if user, err = registIdWith(e, "1234567890", f, l); err == nil {
if c := CreateWishList(user); c.Code != 0 {
utility.SendLog(fmt.Sprint(c.Msg))
time.Sleep(time.Minute * 5)
return user, errors.New("创建失败")
}
return user, nil
} else {
return user, err
}
return user, errors.New("创建失败")
}
func registIdWith(email, password, firstName, lastName string) (User, error) {
// 注册 (POST https://www.wish.com/api/email-signup)
params := url.Values{}
params.Set("_app_type", "wish")
params.Set("_version", "3.20.0")
params.Set("_client", "iosapp")
params.Set("_xsrf", "1")
params.Set("app_device_model", "iPhone9,2")
params.Set("_capabilities[]", "11")
params.Set("_capabilities[]", "12")
params.Set("_capabilities[]", "13")
params.Set("_capabilities[]", "15")
params.Set("_capabilities[]", "2")
params.Set("_capabilities[]", "21")
params.Set("_capabilities[]", "24")
params.Set("_capabilities[]", "25")
params.Set("_capabilities[]", "28")
params.Set("_capabilities[]", "32")
params.Set("_capabilities[]", "35")
params.Set("_capabilities[]", "39")
params.Set("_capabilities[]", "4")
params.Set("_capabilities[]", "40")
params.Set("_capabilities[]", "43")
params.Set("_capabilities[]", "47")
params.Set("_capabilities[]", "6")
params.Set("_capabilities[]", "7")
params.Set("_capabilities[]", "8")
params.Set("_capabilities[]", "9")
AdvertiserID := strings.ToUpper(uuid.NewV4().String())
params.Set("advertiser_id", AdvertiserID)
riskifiedSessionToken := strings.ToUpper(uuid.NewV4().String())
params.Set("_riskified_session_token", riskifiedSessionToken)
key := []byte(strings.ToUpper(uuid.NewV4().String()))
mac := hmac.New(sha1.New, key)
mac.Write([]byte(time.Now().String()))
appDeviceID := fmt.Sprintf("%x", mac.Sum(nil))
params.Set("app_device_id", appDeviceID)
params.Set("first_name", firstName)
params.Set("last_name", lastName)
params.Set("password", password)
params.Set("email", email)
body := bytes.NewBufferString(params.Encode())
// Create client
client := &http.Client{}
// Create request
req, err := http.NewRequest("POST", "https://www.wish.com/api/email-signup", body)
// Headers
req.Header.Add("Cookie", "_xsrf=1; _appLocale=zh-Hans-CN; _timezone=8")
req.Header.Add("User-Agent", "Wish/3.20.0 (iPhone; iOS 10.3.1; Scale/3.00)")
req.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
// Fetch Request
resp, err := client.Do(req)
if err != nil {
fmt.Println("Failure : ", err)
}
// Read Response Body
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
utility.Errorln(err)
if e, ok := err.(*json.SyntaxError); ok {
utility.Errorln(e)
}
}
var loginInfo LoginInfo
err = json.Unmarshal(respBody, &loginInfo)
if err != nil {
utility.Errorln(err)
if e, ok := err.(*json.SyntaxError); ok {
utility.Errorln(e)
}
}
user := User{}
if loginInfo.Code != 0 {
fmt.Println(loginInfo, email)
return user, errors.New("login error")
}
user.UserId = loginInfo.Data.User
user.Email = email
user.Password = password
user.AppDeviceID = appDeviceID
user.AdvertiserId = AdvertiserID
user.RiskifiedSessionToken = riskifiedSessionToken
user.FullName = firstName + " " + lastName
for _, cookie := range resp.Cookies() {
switch cookie.Name {
case "bsid":
user.Baid = cookie.Value
case "sweeper_session":
user.SweeperSession = cookie.Value
}
}
return user, nil
}
func initValue() {
firstName = []string{
"Aaron", "Abbott", "Abel", "Abner", "Abraham", "Adair", "Adam", "Addison",
"Adolph", "Adonis", "Adrian", "Ahern", "Alan", "Albert", "Aldrich", "Alexander",
"Alfred", "Alger", "Algernon", "Allen", "Alston", "Alva", "Alvin", "Alvis", "Amos", "Andre",
"Andrew", "Andy", "Angelo", "Augus", "Ansel", "Antony", "Bevis", "Bill", "Bishop", "Blair", "Blake",
"Bob", "Clarence", "Clark", "Claude", "Clyde", "Colin", "Dana", "Darnell", "Darcy", "Dempsey", "Dominic",
"Edwiin", "Edward", "Elvis", "Fabian", "Frank", "Gale", "Gilbert", "Goddard", "Grover", "Hayden",
"Hogan", "Hunter", "Isaac", "Ingram", "Isidore", "Jacob", "Jason", "Jay", "Jeff", "Jeremy", "Jesse",
"Jerry", "Jim", "Jonathan", "Joseph", "Joshua", "Julian", "Julius", "Ken", "Kennedy", "Kent",
"Kerr", "Kerwin", "Kevin", "Kirk", "King", "Lance", "Larry", "Leif", "Leonard", "Leopold", "Lewis",
"Lionel", "Lucien", "Lyndon", "Magee", "Malcolm", "Mandel", "Marico", "Marsh", "Marvin", "Maximilian",
"Meredith", "Merlin", "Mick", "Michell", "Monroe", "Montague", "Moore", "Mortimer", "Moses", "Nat",
"Nathaniel", "Neil", "Nelson", "Newman", "Nicholas", "Nick", "Noah", "Noel", "Norton", "Ogden",
"Oliver", "Omar", "Orville", "Osborn", "Oscar", "Osmond", "Oswald", "Otis", "Otto", "Owen", "Page", "Parker",
"Paddy", "Patrick", "Paul", "Payne", "Perry", "Pete", "Peter", "Philip", "Phil",
"Primo", "Quennel", "Quincy", "Quintion", "Rachel", "Ralap", "Randolph", "Robin", "Rodney", "Ron",
"Roy", "Rupert", "Ryan", "Sampson", "Samuel", "Simon", "Stan", "Stanford", "Steward",
}
lastName = []string{
"Baker", "Hunter", "Carter", "Smith", "Cook", "Turner", "Baker", "Miller", "Smith", "Turner", "Hall",
"Hill", "Lake", "Field", "Green", "Wood", "Well", "Brown", "Longman", "Short", "White", "Sharp",
"Hard", "Yonng", "Sterling", "Hand", "Bull", "Fox", "Hawk", "Bush", "Stock", "Cotton", "Reed",
"George", "Henry", "David", "Clinton", "Macadam", "Abbot", "Abraham", "Acheson", "Ackerman", "Adam",
"Addison", "Adela", "Adolph", "Agnes", "Albert", "Alcott", "Aldington", "Alerander", "Alick", "Amelia",
"Adams",
}
emailType = []string{
"@gmail.com", "@qq.com", "@126.com", "@163.com", "@vip.sina.com", "@sina.com", "@tom.com", "@263.com", "@189.com", "@outlook.com",
}
contrys = []string{
"AU",
"GB",
"US",
"FR",
"DE",
"CA",
"HK",
"VN",
"SG",
"MY",
"JP",
"KR",
"IN",
"ID",
"GR",
"BR",
"FI",
"AT",
"ES",
"RU",
"NO",
"SE",
"NL",
"CH",
"DK",
"IT",
}
}
|
package user
import (
"net/http"
"github.com/gorilla/mux"
"github.com/jmoiron/sqlx"
)
// Delete - deletes user
func Delete(db *sqlx.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-type", "application/json")
query := "Delete from user where id = ?"
_, err := db.Exec(query, mux.Vars(r)["id"])
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
}
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
"github.com/atomicjolt/string_utils"
)
// ListPagesCourses A paginated list of the wiki pages associated with a course or group
// https://canvas.instructure.com/doc/api/pages.html
//
// Path Parameters:
// # Path.CourseID (Required) ID
//
// Query Parameters:
// # Query.Sort (Optional) . Must be one of title, created_at, updated_atSort results by this field.
// # Query.Order (Optional) . Must be one of asc, descThe sorting order. Defaults to 'asc'.
// # Query.SearchTerm (Optional) The partial title of the pages to match and return.
// # Query.Published (Optional) If true, include only published paqes. If false, exclude published
// pages. If not present, do not filter on published status.
//
type ListPagesCourses struct {
Path struct {
CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required)
} `json:"path"`
Query struct {
Sort string `json:"sort" url:"sort,omitempty"` // (Optional) . Must be one of title, created_at, updated_at
Order string `json:"order" url:"order,omitempty"` // (Optional) . Must be one of asc, desc
SearchTerm string `json:"search_term" url:"search_term,omitempty"` // (Optional)
Published bool `json:"published" url:"published,omitempty"` // (Optional)
} `json:"query"`
}
func (t *ListPagesCourses) GetMethod() string {
return "GET"
}
func (t *ListPagesCourses) GetURLPath() string {
path := "courses/{course_id}/pages"
path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID))
return path
}
func (t *ListPagesCourses) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *ListPagesCourses) GetBody() (url.Values, error) {
return nil, nil
}
func (t *ListPagesCourses) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *ListPagesCourses) HasErrors() error {
errs := []string{}
if t.Path.CourseID == "" {
errs = append(errs, "'Path.CourseID' is required")
}
if t.Query.Sort != "" && !string_utils.Include([]string{"title", "created_at", "updated_at"}, t.Query.Sort) {
errs = append(errs, "Sort must be one of title, created_at, updated_at")
}
if t.Query.Order != "" && !string_utils.Include([]string{"asc", "desc"}, t.Query.Order) {
errs = append(errs, "Order must be one of asc, desc")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *ListPagesCourses) Do(c *canvasapi.Canvas, next *url.URL) ([]*models.Page, *canvasapi.PagedResource, error) {
var err error
var response *http.Response
if next != nil {
response, err = c.Send(next, t.GetMethod(), nil)
} else {
response, err = c.SendRequest(t)
}
if err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, nil, err
}
ret := []*models.Page{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, nil, err
}
pagedResource, err := canvasapi.ExtractPagedResource(response.Header)
if err != nil {
return nil, nil, err
}
return ret, pagedResource, nil
}
|
package main
import (
"os"
"os/signal"
"reflect"
"runtime/pprof"
"github.com/woobest/network"
"github.com/woobest/network/socket"
"github.com/woobest/protocol/pb/msgdef"
)
func main() {
f, _ := os.Create("profile_file")
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
network.RegisterMessageMeta("pb", 1, reflect.TypeOf((*msgdef.TestEchoACK)(nil)).Elem(), func(s network.Session, msg interface{}, meta *network.MessageMeta) {
pack := msg.(*msgdef.TestEchoACK)
pack.Content = "hi"
//fmt.Println(pack.Content)
})
peer := socket.NewAcceptor(network.NewProtocol()).Start("127.0.0.1:10086")
if peer == nil {
return
}
osSignal := make(chan os.Signal, 2)
signal.Notify(osSignal, os.Kill, os.Interrupt)
<-osSignal
peer.Stop()
}
|
package v2
import (
"errors"
"log"
"net/http"
"net/url"
"github.com/labstack/echo/v4"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/handler/v2/openapi"
"github.com/traPtitech/trap-collection-server/src/service"
)
type GameImage struct {
gameImageService service.GameImageV2
}
func NewGameImage(gameImageService service.GameImageV2) *GameImage {
return &GameImage{
gameImageService: gameImageService,
}
}
// ゲーム画像一覧の取得
// (GET /games/{gameID}/images)
func (gameImage *GameImage) GetGameImages(c echo.Context, gameID openapi.GameIDInPath) error {
images, err := gameImage.gameImageService.GetGameImages(c.Request().Context(), values.NewGameIDFromUUID(gameID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if err != nil {
log.Printf("error: failed to get game images: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get game images")
}
resImages := make([]openapi.GameImage, 0, len(images))
for _, image := range images {
var mime openapi.GameImageMime
switch image.GetType() {
case values.GameImageTypeJpeg:
mime = openapi.Imagejpeg
case values.GameImageTypePng:
mime = openapi.Imagepng
case values.GameImageTypeGif:
mime = openapi.Imagegif
default:
log.Printf("error: unknown game image type: %v\n", image.GetType())
return echo.NewHTTPError(http.StatusInternalServerError, "unknown game image type")
}
resImages = append(resImages, openapi.GameImage{
Id: openapi.GameImageID(image.GetID()),
Mime: mime,
CreatedAt: image.GetCreatedAt(),
})
}
return c.JSON(http.StatusOK, resImages)
}
// ゲームファイルの作成
// (POST /games/{gameID}/images)
func (gameImage *GameImage) PostGameImage(c echo.Context, gameID openapi.GameIDInPath) error {
header, err := c.FormFile("content")
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid file")
}
file, err := header.Open()
if err != nil {
log.Printf("error: failed to open file: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to open file")
}
defer file.Close()
image, err := gameImage.gameImageService.SaveGameImage(c.Request().Context(), file, values.NewGameIDFromUUID(gameID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if errors.Is(err, service.ErrInvalidFormat) {
return echo.NewHTTPError(http.StatusBadRequest, "invalid image type")
}
if err != nil {
log.Printf("error: failed to save game image: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to save game image")
}
var mime openapi.GameImageMime
switch image.GetType() {
case values.GameImageTypeJpeg:
mime = openapi.Imagejpeg
case values.GameImageTypePng:
mime = openapi.Imagepng
case values.GameImageTypeGif:
mime = openapi.Imagegif
default:
log.Printf("error: unknown game image type: %v\n", image.GetType())
return echo.NewHTTPError(http.StatusInternalServerError, "unknown game image type")
}
return c.JSON(http.StatusCreated, openapi.GameImage{
Id: openapi.GameImageID(image.GetID()),
Mime: mime,
CreatedAt: image.GetCreatedAt(),
})
}
// ゲーム画像のバイナリの取得
// (GET /games/{gameID}/images/{gameImageID})
func (gameImage *GameImage) GetGameImage(c echo.Context, gameID openapi.GameIDInPath, gameImageID openapi.GameImageIDInPath) error {
tmpURL, err := gameImage.gameImageService.GetGameImage(c.Request().Context(), values.NewGameIDFromUUID(gameID), values.GameImageIDFromUUID(gameImageID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if errors.Is(err, service.ErrInvalidGameImageID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameImageID")
}
if err != nil {
log.Printf("error: failed to get game image: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get game image")
}
return c.Redirect(http.StatusSeeOther, (*url.URL)(tmpURL).String())
}
// ゲーム画像のメタ情報の取得
// (GET /games/{gameID}/images/{gameImageID}/meta)
func (gameImage *GameImage) GetGameImageMeta(ctx echo.Context, gameID openapi.GameIDInPath, gameImageID openapi.GameImageIDInPath) error {
image, err := gameImage.gameImageService.GetGameImageMeta(ctx.Request().Context(), values.NewGameIDFromUUID(gameID), values.GameImageIDFromUUID(gameImageID))
if errors.Is(err, service.ErrInvalidGameID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameID")
}
if errors.Is(err, service.ErrInvalidGameImageID) {
return echo.NewHTTPError(http.StatusNotFound, "invalid gameImageID")
}
if err != nil {
log.Printf("error: failed to get game image meta: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get game image meta")
}
var mime openapi.GameImageMime
switch image.GetType() {
case values.GameImageTypeJpeg:
mime = openapi.Imagejpeg
case values.GameImageTypePng:
mime = openapi.Imagepng
case values.GameImageTypeGif:
mime = openapi.Imagegif
default:
log.Printf("error: unknown game image type: %v\n", image.GetType())
return echo.NewHTTPError(http.StatusInternalServerError, "unknown game image type")
}
return ctx.JSON(http.StatusOK, openapi.GameImage{
Id: openapi.GameImageID(image.GetID()),
Mime: mime,
CreatedAt: image.GetCreatedAt(),
})
}
|
// while copying of maps, impacts both the value
// so inshort maps are not copied
package main
import "fmt"
func main() {
planets := map[string]string{
"Earth": "Sector ZZ9",
"Mars": "Sector ZZ9",
}
planetsMarkII := planets
planets["Earth"] = "whoops"
fmt.Println(planets)
fmt.Println(planetsMarkII)
// delete map key:val
delete(planets, "Earth")
fmt.Println(planets)
}
// map[Earth:whoops Mars:Sector ZZ9]
// map[Earth:whoops Mars:Sector ZZ9]
// map[Mars:Sector ZZ9]
|
/*
Copyright 2022 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"regexp"
"github.com/blang/semver"
shell "github.com/kballard/go-shellquote"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
)
var (
// VersionRegex extracts version from "helm version --client", for instance: "2.14.0-rc.2"
VersionRegex = regexp.MustCompile(`v?(\d[\w.\-]+)`)
// OSExecutable allows for replacing the skaffold binary for testing purposes
OSExecutable = os.Executable
// WriteBuildArtifacts is meant to be reassigned for testing
WriteBuildArtifacts = writeBuildArtifacts
)
type Client interface {
EnableDebug() bool
OverrideProtocols() []string
ConfigFile() string
KubeConfig() string
KubeContext() string
Labels() map[string]string
GlobalFlags() []string
ManifestOverrides() map[string]string
}
// BinVer returns the version of the helm binary found in PATH.
func BinVer(ctx context.Context) (semver.Version, error) {
cmd := exec.Command("helm", "version", "--client")
b, err := util.RunCmdOut(ctx, cmd)
if err != nil {
return semver.Version{}, fmt.Errorf("helm version command failed %q: %w", string(b), err)
}
raw := string(b)
matches := VersionRegex.FindStringSubmatch(raw)
if len(matches) == 0 {
return semver.Version{}, fmt.Errorf("unable to parse output: %q", raw)
}
return semver.ParseTolerant(matches[1])
}
func PrepareSkaffoldFilter(h Client, builds []graph.Artifact) (skaffoldBinary string, env []string, cleanup func(), err error) {
skaffoldBinary, err = OSExecutable()
if err != nil {
return "", nil, nil, fmt.Errorf("cannot locate this Skaffold binary: %w", err)
}
var buildsFile string
if len(builds) > 0 {
buildsFile, cleanup, err = WriteBuildArtifacts(builds)
if err != nil {
return "", nil, nil, fmt.Errorf("could not write build-artifacts: %w", err)
}
}
cmdLine := generateSkaffoldFilter(h, buildsFile)
env = append(env, fmt.Sprintf("SKAFFOLD_CMDLINE=%s", shell.Join(cmdLine...)))
env = append(env, fmt.Sprintf("SKAFFOLD_FILENAME=%s", h.ConfigFile()))
return
}
// generateSkaffoldFilter creates a "skaffold filter" command-line for applying the various
// Skaffold manifest filters, such a debugging, image replacement, and applying labels.
func generateSkaffoldFilter(h Client, buildsFile string) []string {
args := []string{"filter", "--kube-context", h.KubeContext()}
if h.EnableDebug() {
args = append(args, "--debugging")
for _, overrideProtocol := range h.OverrideProtocols() {
args = append(args, fmt.Sprintf("--protocols=%s", overrideProtocol))
}
}
for k, v := range h.Labels() {
args = append(args, fmt.Sprintf("--label=%s=%s", k, v))
}
for k, v := range h.ManifestOverrides() {
args = append(args, fmt.Sprintf("--set=%s=%s", k, v))
}
if len(buildsFile) > 0 {
args = append(args, "--build-artifacts", buildsFile)
}
args = append(args, h.GlobalFlags()...)
if h.KubeConfig() != "" {
args = append(args, "--kubeconfig", h.KubeConfig())
}
return args
}
func generateHelmCommand(ctx context.Context, h Client, useSecrets bool, env []string, args ...string) *exec.Cmd {
args = append([]string{"--kube-context", h.KubeContext()}, args...)
args = append(args, h.GlobalFlags()...)
if h.KubeConfig() != "" {
args = append(args, "--kubeconfig", h.KubeConfig())
}
if useSecrets {
args = append([]string{"secrets"}, args...)
}
cmd := exec.CommandContext(ctx, "helm", args...)
if len(env) > 0 {
cmd.Env = env
}
return cmd
}
// Exec executes the helm command, writing combined stdout/stderr to the provided writer
func Exec(ctx context.Context, h Client, out io.Writer, useSecrets bool, env []string, args ...string) error {
cmd := generateHelmCommand(ctx, h, useSecrets, env, args...)
cmd.Stdout = out
cmd.Stderr = out
return util.RunCmd(ctx, cmd)
}
// ExecWithStdoutAndStderr executes the helm command, writing combined stdout and stderr to the provided writers
func ExecWithStdoutAndStderr(ctx context.Context, h Client, stdout io.Writer, stderr io.Writer, useSecrets bool, env []string, args ...string) error {
cmd := generateHelmCommand(ctx, h, useSecrets, env, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
return util.RunCmd(ctx, cmd)
}
|
// Package color provides color convention and useful functions
package color
var Aliceblue = NewFromHEX(0xf0f8ff)
var Antiquewhite = NewFromHEX(0xfaebd7)
var Aqua = NewFromHEX(0x00ffff)
var Aquamarine = NewFromHEX(0x7fffd4)
var Azure = NewFromHEX(0xf0ffff)
var Beige = NewFromHEX(0xf5f5dc)
var Bisque = NewFromHEX(0xffe4c4)
var Black = NewFromHEX(0x000000)
var Blanchedalmond = NewFromHEX(0xffebcd)
var Blue = NewFromHEX(0x0000ff)
var Blueviolet = NewFromHEX(0x8a2be2)
var Brown = NewFromHEX(0xa52a2a)
var Burlywood = NewFromHEX(0xdeb887)
var Cadetblue = NewFromHEX(0x5f9ea0)
var Chartreuse = NewFromHEX(0x7fff00)
var Chocolate = NewFromHEX(0xd2691e)
var Coral = NewFromHEX(0xff7f50)
var Cornflowerblue = NewFromHEX(0x6495ed)
var Cornsilk = NewFromHEX(0xfff8dc)
var Crimson = NewFromHEX(0xdc143c)
var Cyan = NewFromHEX(0x00ffff)
var Darkblue = NewFromHEX(0x00008b)
var Darkcyan = NewFromHEX(0x008b8b)
var Darkgoldenrod = NewFromHEX(0xb8860b)
var Darkgray = NewFromHEX(0xa9a9a9)
var Darkgreen = NewFromHEX(0x006400)
var Darkkhaki = NewFromHEX(0xbdb76b)
var Darkmagenta = NewFromHEX(0x8b008b)
var Darkolivegreen = NewFromHEX(0x556b2f)
var Darkorange = NewFromHEX(0xff8c00)
var Darkorchid = NewFromHEX(0x9932cc)
var Darkred = NewFromHEX(0x8b0000)
var Darksalmon = NewFromHEX(0xe9967a)
var Darkseagreen = NewFromHEX(0x8fbc8f)
var Darkslateblue = NewFromHEX(0x483d8b)
var Darkslategray = NewFromHEX(0x2f4f4f)
var Darkturquoise = NewFromHEX(0x00ced1)
var Darkviolet = NewFromHEX(0x9400d3)
var Deeppink = NewFromHEX(0xff1493)
var Deepskyblue = NewFromHEX(0x00bfff)
var Dimgray = NewFromHEX(0x696969)
var Dodgerblue = NewFromHEX(0x1e90ff)
var Firebrick = NewFromHEX(0xb22222)
var Floralwhite = NewFromHEX(0xfffaf0)
var Forestgreen = NewFromHEX(0x228b22)
var Fuchsia = NewFromHEX(0xff00ff)
var Gainsboro = NewFromHEX(0xdcdcdc)
var Ghostwhite = NewFromHEX(0xf8f8ff)
var Gold = NewFromHEX(0xffd700)
var Goldenrod = NewFromHEX(0xdaa520)
var Gray = NewFromHEX(0x808080)
var Green = NewFromHEX(0x008000)
var Greenyellow = NewFromHEX(0xadff2f)
var Honeydew = NewFromHEX(0xf0fff0)
var Hotpink = NewFromHEX(0xff69b4)
var Indianred = NewFromHEX(0xcd5c5c)
var Indigo = NewFromHEX(0x4b0082)
var Ivory = NewFromHEX(0xfffff0)
var Khaki = NewFromHEX(0xf0e68c)
var Lavender = NewFromHEX(0xe6e6fa)
var Lavenderblush = NewFromHEX(0xfff0f5)
var Lawngreen = NewFromHEX(0x7cfc00)
var Lemonchiffon = NewFromHEX(0xfffacd)
var Lightblue = NewFromHEX(0xadd8e6)
var Lightcoral = NewFromHEX(0xf08080)
var Lightcyan = NewFromHEX(0xe0ffff)
var Lightgoldenrodyellow = NewFromHEX(0xfafad2)
var Lightgray = NewFromHEX(0xd3d3d3)
var Lightgreen = NewFromHEX(0x90ee90)
var Lightpink = NewFromHEX(0xffb6c1)
var Lightsalmon = NewFromHEX(0xffa07a)
var Lightseagreen = NewFromHEX(0x20b2aa)
var Lightskyblue = NewFromHEX(0x87cefa)
var Lightslategray = NewFromHEX(0x778899)
var Lightsteelblue = NewFromHEX(0xb0c4de)
var Lightyellow = NewFromHEX(0xffffe0)
var Lime = NewFromHEX(0x00ff00)
var LimegreAliceblueen = NewFromHEX(0x32cd32)
var Linen = NewFromHEX(0xfaf0e6)
var Magenta = NewFromHEX(0xff00ff)
var Maroon = NewFromHEX(0x800000)
var Mediumaquamarine = NewFromHEX(0x66cdaa)
var Mediumblue = NewFromHEX(0x0000cd)
var Mediumorchid = NewFromHEX(0xba55d3)
var Mediumpurple = NewFromHEX(0x9370d8)
var Mediumseagreen = NewFromHEX(0x3cb371)
var Mediumslateblue = NewFromHEX(0x7b68ee)
var Mediumspringgreen = NewFromHEX(0x00fa9a)
var Mediumturquoise = NewFromHEX(0x48d1cc)
var Mediumvioletred = NewFromHEX(0xc71585)
var Midnightblue = NewFromHEX(0x191970)
var Mintcream = NewFromHEX(0xf5fffa)
var Mistyrose = NewFromHEX(0xffe4e1)
var Moccasin = NewFromHEX(0xffe4b5)
var Navajowhite = NewFromHEX(0xffdead)
var Navy = NewFromHEX(0x000080)
var Oldlace = NewFromHEX(0xfdf5e6)
var Olive = NewFromHEX(0x808000)
var Olivedrab = NewFromHEX(0x6b8e23)
var Orange = NewFromHEX(0xffa500)
var Orangered = NewFromHEX(0xff4500)
var Orchid = NewFromHEX(0xda70d6)
var Palegoldenrod = NewFromHEX(0xeee8aa)
var Palegreen = NewFromHEX(0x98fb98)
var Paleturquoise = NewFromHEX(0xafeeee)
var Palevioletred = NewFromHEX(0xd87093)
var Papayawhip = NewFromHEX(0xffefd5)
var Peachpuff = NewFromHEX(0xffdab9)
var Peru = NewFromHEX(0xcd853f)
var Pink = NewFromHEX(0xffc0cb)
var Plum = NewFromHEX(0xdda0dd)
var Powderblue = NewFromHEX(0xb0e0e6)
var Purple = NewFromHEX(0x800080)
var Red = NewFromHEX(0xff0000)
var Rosybrown = NewFromHEX(0xbc8f8f)
var Royalblue = NewFromHEX(0x4169e1)
var Saddlebrown = NewFromHEX(0x8b4513)
var Salmon = NewFromHEX(0xfa8072)
var Sandybrown = NewFromHEX(0xf4a460)
var Seagreen = NewFromHEX(0x2e8b57)
var Seashell = NewFromHEX(0xfff5ee)
var Sienna = NewFromHEX(0xa0522d)
var Silver = NewFromHEX(0xc0c0c0)
var Skyblue = NewFromHEX(0x87ceeb)
var Slateblue = NewFromHEX(0x6a5acd)
var Slategray = NewFromHEX(0x708090)
var Snow = NewFromHEX(0xfffafa)
var Springgreen = NewFromHEX(0x00ff7f)
var Steelblue = NewFromHEX(0x4682b4)
var Tan = NewFromHEX(0xd2b48c)
var Teal = NewFromHEX(0x008080)
var Thistle = NewFromHEX(0xd8bfd8)
var Tomato = NewFromHEX(0xff6347)
var Turquoise = NewFromHEX(0x40e0d0)
var Violet = NewFromHEX(0xee82ee)
var Wheat = NewFromHEX(0xf5deb3)
var White = NewFromHEX(0xffffff)
var Whitesmoke = NewFromHEX(0xf5f5f5)
var Yellow = NewFromHEX(0xffff00)
var Yellowgreen = NewFromHEX(0x9acd32)
|
package api
import (
"fmt"
"go.rock.com/rock-platform/rock/server/database"
"go.rock.com/rock-platform/rock/server/database/models"
"go.rock.com/rock-platform/rock/server/utils"
)
// insert the current deployment info into the database
func CreateDeployment(appId, envId int64, chartName, chartVersion, description, namespace string) (*models.Deployment, error) {
deployment := models.Deployment{
Description: description,
ChartName: chartName,
ChartVersion: chartVersion,
AppId: appId,
EnvId: envId,
}
deployment.Name = utils.GenerateChartName(chartName, namespace)
// 由于 app_id env_id 在调用此函数前已经做了检测了,所以这里就不再做检测了。
db := database.GetDBEngine()
if err := db.Create(&deployment).Error; err != nil {
return nil, err
}
return &deployment, nil
}
func GetDeployments(pageNum, pageSize int64) (*models.DeploymentPagination, error) {
db := database.GetDBEngine()
Deployments := make([]*models.Deployment, 0)
var count int64
if err := db.Order("updated_at desc").
Offset((pageNum - 1) * pageSize).
Find(&Deployments).
Count(&count).Error; err != nil {
return nil, err
}
if err := db.Order("updated_at desc").
Offset((pageNum - 1) * pageSize).
Limit(pageSize).
Find(&Deployments).Error; err != nil {
return nil, err
}
deploymentPagination := &models.DeploymentPagination{
PageNum: pageNum,
PageSize: pageSize,
Total: count,
Pages: utils.CalcPages(count, pageSize),
Items: Deployments,
}
return deploymentPagination, nil
}
// get deployment by deployment id
func GetDeploymentById(id int64) (*models.Deployment, error) {
deployment := new(models.Deployment)
db := database.GetDBEngine()
if err := db.First(deployment, id).Error; err != nil {
if err.Error() == "record not found" {
e := utils.NewRockError(404, 40400011, fmt.Sprintf("Deployment with id(%v) was not found", id))
return nil, e
}
return nil, err
}
return deployment, nil
}
// delete deployment by deployment id
func DeleteDeploymentById(id int64) error {
deployment, err := GetDeploymentById(id)
if err != nil {
return err
}
db := database.GetDBEngine()
if err := db.Delete(deployment, id).Error; err != nil {
return err
}
return nil
}
// UpdateDeploymentById
// update the deployment by id app_id env_id chart_name chart_version
func UpdateDeploymentById(id, appId, envId int64, chartName, chartVersion, description string) (*models.Deployment, error) {
deployment, err := GetDeploymentById(id)
if err != nil {
return nil, err
}
_, err = GetAppById(appId)
if err != nil {
return nil, err
}
_, err = GetEnvById(envId)
if err != nil {
return nil, err
}
db := database.GetDBEngine()
if err := db.Model(deployment).Update(map[string]interface{}{"app_id": appId, "env_id": envId, "chart_name": chartName, "chart_version": chartVersion, "description": description}).Error; err != nil {
return nil, err
}
return deployment, nil
}
// get all deployment by instance_name(equal deployment_name)
func GetDeploymentsByName(name string, pageNum, pageSize int64) (*models.DeploymentPagination, error) {
db := database.GetDBEngine()
Deployments := make([]*models.Deployment, 0)
var count int64
if err := db.Order("updated_at desc").
Offset((pageNum-1)*pageSize).
Where("name = ?", name).
Find(&Deployments).
Count(&count).Error; err != nil {
return nil, err
}
if err := db.Order("updated_at desc").
Offset((pageNum-1)*pageSize).
Where("name = ?", name).
Limit(pageSize).
Find(&Deployments).Error; err != nil {
return nil, err
}
deploymentPagination := &models.DeploymentPagination{
PageNum: pageNum,
PageSize: pageSize,
Total: count,
Pages: utils.CalcPages(count, pageSize),
Items: Deployments,
}
return deploymentPagination, nil
}
|
package routers
import (
"github.com/apulis/AIArtsBackend/models"
"github.com/apulis/AIArtsBackend/services"
"github.com/gin-gonic/gin"
)
func AddGroupUpdatePlatform(r *gin.Engine) {
group := r.Group("/ai_arts/api/version")
group.Use(Auth())
group.GET("/info", wrapper(getVersionInfo))
group.GET("/detail/:id", wrapper(getVersionDetailByID))
group.GET("/upgradeProgress", wrapper(getLocalUpgradeProgress))
group.GET("/upgradeLog", wrapper(getLocalUpgradeLog))
group.GET("/env/local", wrapper(checkLocalEnv))
group.POST("/upgrade/online", wrapper(upgradeOnline))
group.POST("/upgrade/local", wrapper(upgradeLocal))
}
type getVersionInfoResp struct {
CurrentVersion models.VersionInfoSet `json:"versionInfo"`
VersionInfo []models.VersionInfoSet `json:"versionLogs"`
IsUpgrading bool `json:"isUpgrading"`
}
type getVersionInfoReq struct {
queryLimit int `form:"limit,default=10"`
}
type getLocalEnvResp struct {
CanUpgrade bool `json:"canUpgrade"`
IsLower bool `json:"isLower"`
}
type getLocalUpgradeProgressResp struct {
Status string `json:"status"`
Percent int `json:"percent"`
}
type getLocalUpgradeLogResp struct {
Status string `json:"status"`
LogString string `json:"logString"`
}
// @Summary get version infomation
// @Produce json
// @Success 200 {object} getVersionInfoResp "success"
// @Failure 400 {object} APIException "error"
// @Failure 404 {object} APIException "not found"
// @Router /ai_arts/api/version/info [get]
func getVersionInfo(c *gin.Context) error {
currentversion, err := services.GetCurrentVersion()
if err != nil {
return AppError(APP_ERROR_CODE, err.Error())
}
versionlogs, err := services.GetVersionLogs()
if err != nil {
return AppError(APP_ERROR_CODE, err.Error())
}
status := services.GetUpgradeStatus()
var isUpgrading bool
if status == "upgrading" {
isUpgrading = true
} else {
isUpgrading = false
}
data := getVersionInfoResp{
CurrentVersion: currentversion,
VersionInfo: versionlogs,
IsUpgrading: isUpgrading,
}
return SuccessResp(c, data)
}
func getVersionDetailByID(c *gin.Context) error {
data := "test"
return SuccessResp(c, data)
}
// @Summary get local upgrade process
// @Produce json
// @Success 200 {object} getLocalUpgradeProgressResp
// @Failure 400 {object} APIException "error"
// @Failure 404 {object} APIException "not found"
// @Router /ai_arts/api/version/upgradeProgress [get]
func getLocalUpgradeProgress(c *gin.Context) error {
status, progress := services.GetUpgradeProgress()
data := getLocalUpgradeProgressResp{
Status: status,
Percent: progress,
}
return SuccessResp(c, data)
}
// @Summary get local upgrade log
// @Produce json
// @Success 200 {object} getLocalUpgradeLogResp
// @Failure 400 {object} APIException "error"
// @Failure 404 {object} APIException "not found"
// @Router /ai_arts/api/version/upgradeLog [get]
func getLocalUpgradeLog(c *gin.Context) error {
status, Log, err := services.GetUpgradeLog()
if err != nil {
return AppError(APP_ERROR_CODE, err.Error())
}
data := getLocalUpgradeLogResp{
Status: status,
LogString: Log,
}
return SuccessResp(c, data)
}
// @Summary get local upgrade environment info
// @Produce json
// @Success 200 {object} getLocalEnvResp
// @Failure 400 {object} APIException "error"
// @Failure 404 {object} APIException "not found"
// @Router /ai_arts/api/version/env/local [get]
func checkLocalEnv(c *gin.Context) error {
canUpgrade, isLower, err := services.GetLocalUpgradeEnv()
if err != nil {
return AppError(APP_ERROR_CODE, err.Error())
}
data := getLocalEnvResp{
CanUpgrade: canUpgrade,
IsLower: isLower,
}
return SuccessResp(c, data)
}
func upgradeOnline(c *gin.Context) error {
data := "test"
return SuccessResp(c, data)
}
// @Summary upgrade through local package
// @Produce json
// @Success 200 {object} APISuccessResp "success"
// @Failure 400 {object} APIException "error"
// @Failure 404 {object} APIException "not found"
// @Router /ai_arts/api/version/upgrade/local [post]
func upgradeLocal(c *gin.Context) error {
err := services.UpgradePlatformByLocal(getUsername(c))
if err != nil {
return AppError(ALREADY_UPGRADING_CODE, err.Error())
}
data := gin.H{}
return SuccessResp(c, data)
}
|
package master
import (
"bufio"
"net"
"strings"
"time"
)
type ServerList map[string]*net.UDPAddr
type Server struct {
addr string
timeout time.Duration
cache ServerList // to re-use resolved UDP addresses
}
func New(addr string, timeout time.Duration) *Server {
return &Server{
addr: addr,
timeout: timeout,
cache: ServerList{},
}
}
func (s *Server) ServerList() (servers ServerList, err error) {
conn, err := net.DialTimeout("tcp", s.addr, s.timeout)
if err != nil {
return
}
defer conn.Close()
in := bufio.NewScanner(conn)
out := bufio.NewWriter(conn)
// request list
_, err = out.WriteString("list\n")
if err != nil {
return
}
err = out.Flush()
if err != nil {
return
}
// receive list
servers = ServerList{}
for in.Scan() {
msg := in.Text()
if !strings.HasPrefix(msg, "addserver ") || msg == "\x00" {
continue
}
msg = strings.TrimPrefix(msg, "addserver ")
msg = strings.TrimSpace(msg)
// 12.23.34.45 28785 → 12.23.34.45:28785
msg = strings.Replace(msg, " ", ":", -1)
addr, ok := s.cache[msg]
if !ok {
addr, err = net.ResolveUDPAddr("udp", msg)
if err != nil {
return
}
s.cache[msg] = addr // cache resolved address
}
servers[addr.String()] = addr
}
err = in.Err()
return
}
func (s *Server) Address() string { return s.addr }
|
package ess
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
esssdk "github.com/aliyun/alibaba-cloud-sdk-go/services/ess"
)
// LifecycleHook struct is mapped to lifecycle hook template
type LifecycleHook struct {
LifecycleHookName string
LifecycleHookID string
LifecycleTransition string
DefaultResult string
HeartbeatTimeout int
}
// GetLifecycleHooks returns list of lifecyclehooks
func (c *Client) GetLifecycleHooks(scalingGroupID string) ([]LifecycleHook, error) {
req := esssdk.CreateDescribeLifecycleHooksRequest()
req.PageSize = requests.NewInteger(50)
req.ScalingGroupId = scalingGroupID
lifecycleHooks := []LifecycleHook{}
for totalCount := req.PageSize; totalCount == req.PageSize; {
resp, err := c.DescribeLifecycleHooks(req)
if err != nil {
return nil, err
}
for _, lh := range resp.LifecycleHooks.LifecycleHook {
lifecycleHook := LifecycleHook{}
lifecycleHook.LifecycleHookName = lh.LifecycleHookName
lifecycleHook.LifecycleHookID = lh.LifecycleHookId
lifecycleHook.LifecycleTransition = lh.LifecycleTransition
lifecycleHook.HeartbeatTimeout = lh.HeartbeatTimeout
lifecycleHook.DefaultResult = lh.DefaultResult
lifecycleHooks = append(lifecycleHooks, lifecycleHook)
}
req.PageNumber = requests.NewInteger(resp.PageNumber + 1)
totalCount = requests.NewInteger(len(resp.LifecycleHooks.LifecycleHook))
}
return lifecycleHooks, nil
}
|
package main
// Leetcode 1287. (easy)
func findSpecialInteger(arr []int) int {
scan := len(arr) / 4
for i := 0; i < len(arr); i += scan {
left := leftBound(arr, arr[i])
right := rightBound(arr, arr[i])
if right-left+1 > len(arr)/4 {
return arr[i]
}
}
return -1
}
|
package diagnostic
import (
"context"
"go.uber.org/zap"
)
type DiagnosticService struct {
log *zap.SugaredLogger
UnimplementedDiagnosticServiceServer
}
func NewDiagnosticService(log *zap.SugaredLogger) DiagnosticService {
return DiagnosticService{
log: log,
}
}
func (s DiagnosticService) Ping(ctx context.Context, request *PingRequest) (*PingResponse, error) {
return &PingResponse{Answer: "pong"}, nil
}
|
package main
import "database/sql"
type HereResult struct {
Response struct {
View []struct {
Result []struct {
Location struct {
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
} `json:"location"`
} `json:"Result"`
} `json:"View"`
} `json:"Response"`
}
type Config struct {
Port string `json:"port"`
DBUrl string `json:"dbUrl"`
AccessToken string `json:"accessToken"`
PushWorkers int `json:"push_workers"`
}
type Location struct {
ID int `json:"id"`
Name string `json:"name"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
BankName string `json:"bank_name"`
Type string `json:"type"`
WorkTime string `json:"work_time"`
Currency string `json:"currency"`
Cashless bool `json:"cashless"`
IsMerchant bool `json:"is_merchant"`
Address string `json:"address"`
BankID int `json:"bank_id"`
}
type Bank struct {
ID int `json:"id"`
Name string `json:"name"`
OfficialName string `json:"official_name"`
}
type SimpleRequest struct {
Limit int `json:"limit"`
Offset int `json:"offset"`
}
type FilterATMRequest struct {
Currency string `json:"currency"`
Bank string `json:"bank"`
}
type AddPartnerRequest struct {
BankID int `json:"bank_id"`
PartnerID int `json:"partner_id"`
}
type ATMNearbyRequest struct {
Bank string `json:"bank"`
Currency string `json:"currency"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
}
type accessToDB interface {
QueryRow(query string, argv ...interface{}) *sql.Row
Query(query string, argv ...interface{}) (*sql.Rows, error)
Exec(query string, args ...interface{}) (sql.Result, error)
Prepare(query string) (*sql.Stmt, error)
}
|
// Copyright 2022 Saferwall. All rights reserved.
// Use of this source code is governed by Apache v2 license
// license that can be found in the LICENSE file.
// Package gib metrics.go implements accuracy metrics for rating detection on
// several test cases.
package gib
// Labels : positive class is gibberish and negative class is correct string
// True Positive : A gibberish string labeled by the score function as gibberish
// False Positive : A correct (non-gibberish) string labeled by the score function as gibberish
// True Negative : A correct string labeled by the score function as non-gibberish
// False Negative : A gibberish string labeled by the score function as non-gibberish
// Accuracy is the fraction of predictions gib made correctly.
func Accuracy(truePositiveCount, falsePositiveCount, trueNegativeCount,
falseNegativeCount int) float64 {
// just cast to float64
tpCount := float64(truePositiveCount)
fpCount := float64(falsePositiveCount)
tnCount := float64(trueNegativeCount)
fnCount := float64(falseNegativeCount)
return (tpCount + tnCount) / (tpCount + fpCount + tnCount + fnCount)
}
// Precision is a metric to answer the question:
// "What proportion of positive identifications was actually correct".
func Precision(truePositiveCount, falsePositiveCount int) float64 {
tpCount := float64(truePositiveCount)
fpCount := float64(falsePositiveCount)
// if a model has zero false positives it's precision is 1.
return tpCount / (tpCount + fpCount)
}
// Recall is a metric to answer the question:
// "What proportion of actual positives was identified correctly?"
func Recall(truePositiveCount, falseNegativeCount int) float64 {
tpCount := float64(truePositiveCount)
fnCount := float64(falseNegativeCount)
return tpCount / (tpCount + fnCount)
}
|
package expandurl
import "net/http"
import "net/url"
//Expand URL
func Expand(uri string) (string, error) {
decodedURL, urlError := url.QueryUnescape(uri)
if urlError != nil {
return "", urlError
}
resp, err := http.Get(decodedURL)
if err != nil {
return "", err
}
return resp.Request.URL.String(), nil
}
|
package discovery
import (
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseResponse(t *testing.T) {
xml, err := ioutil.ReadFile("./probe_match_example.xml")
if err != nil {
t.Fatalf("Cannot read xml: %s", err)
}
messageID := "uuid:0a6dc791-2be6-4991-9af1-454778a1917a"
device, err := parseResponse(messageID, xml)
assert.Equal(t, "http://prn-example/PRN42/b42-1668-a", device.Address)
}
|
package serix_test
import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotaledger/hive.go/serializer/v2"
"github.com/iotaledger/hive.go/serializer/v2/serix"
)
func TestDecode_Slice(t *testing.T) {
t.Parallel()
testObj := Bools{true, false, true, true}
ts := serix.TypeSettings{}.WithLengthPrefixType(boolsLenType)
testDecode(t, testObj, serix.WithTypeSettings(ts))
}
func TestDecode_Struct(t *testing.T) {
t.Parallel()
testObj := NewSimpleStruct()
testDecode(t, testObj)
}
func TestDecode_Interface(t *testing.T) {
t.Parallel()
testObj := StructWithInterface{
Interface: &InterfaceImpl{
interfaceImpl{
A: 1,
B: 2,
},
},
}
testDecode(t, testObj)
}
func TestDecode_Pointer(t *testing.T) {
t.Parallel()
ss := NewSimpleStruct()
testObj := &ss
testDecode(t, testObj)
}
func TestDecode_Optional(t *testing.T) {
t.Parallel()
testObj := StructWithOptionalField{Optional: nil}
testDecode(t, testObj)
}
func TestDecode_EmbeddedStructs(t *testing.T) {
t.Parallel()
testObj := StructWithEmbeddedStructs{
unexportedStruct: unexportedStruct{Foo: 1},
ExportedStruct: ExportedStruct{Bar: 2},
}
testDecode(t, testObj)
}
func TestDecode_Map(t *testing.T) {
t.Parallel()
testObj := Map{
0: 2,
1: 4,
}
testDecode(t, testObj, serix.WithTypeSettings(serix.TypeSettings{}.WithLengthPrefixType(mapLenType)))
}
func TestDecode_Deserializable(t *testing.T) {
t.Parallel()
testObject := CustomSerializable(2)
testDecode(t, testObject)
}
func TestDecode_DeserializablePointer(t *testing.T) {
t.Parallel()
cs := CustomSerializable(2)
testObject := &cs
testDecode(t, testObject)
}
func TestDecode_SyntacticValidation(t *testing.T) {
t.Parallel()
testObj := &ObjectForSyntacticValidation{}
bytesRead, err := testAPI.Decode(ctx, nil, testObj, serix.WithValidation())
require.Zero(t, bytesRead)
assert.ErrorIs(t, err, errSyntacticValidation)
}
func TestDecode_BytesValidation(t *testing.T) {
t.Parallel()
testObj := &ObjectForBytesValidation{}
bytesRead, err := testAPI.Decode(ctx, nil, testObj, serix.WithValidation())
require.Zero(t, bytesRead)
assert.ErrorIs(t, err, errBytesValidation)
}
func TestDecode_ArrayRules(t *testing.T) {
t.Parallel()
testObj := &Bools{true, false, true, true}
bytes, err := testObj.Serialize(defaultSeriMode, nil)
require.NoError(t, err)
rules := &serix.ArrayRules{Min: 5}
ts := serix.TypeSettings{}.WithLengthPrefixType(boolsLenType).WithArrayRules(rules)
bytesRead, err := testAPI.Decode(ctx, bytes, testObj, serix.WithValidation(), serix.WithTypeSettings(ts))
require.Zero(t, bytesRead)
assert.Contains(t, err.Error(), "min count of elements within the array not reached")
}
func testDecode(t testing.TB, expected serializer.Serializable, opts ...serix.Option) {
bytes, err := expected.Serialize(defaultSeriMode, nil)
require.NoError(t, err)
got := reflect.New(reflect.TypeOf(expected)).Elem()
bytesRead, err := testAPI.Decode(ctx, bytes, got.Addr().Interface(), opts...)
require.NoError(t, err)
assert.Equal(t, expected, got.Interface())
assert.Equal(t, len(bytes), bytesRead)
}
|
package model
import (
"os"
"github.com/go-ini/ini"
"github.com/vinipis/project-go/showglobalstatus/structs"
)
//MyCnf realiza a leitura de um arquivo my.cnf e caso não tenha ele insere variaveis default
func MyCnf() (valueCnf []string) {
valueParameters, validaflag := structs.ParametersTerminal()
host, hostflag := valueParameters[0], validaflag[0]
port, portflag := valueParameters[1], validaflag[1]
user, userflag := valueParameters[2], validaflag[2]
password, passflag := valueParameters[3], validaflag[3]
cfg, err := ini.Load(os.Getenv("HOME") + "/.my.cnf")
if err == nil {
host = cfg.Section("client").Key("host").Validate(func(in string) string {
if in == "" || hostflag == 1 {
return host
}
return in
})
}
valueCnf = append(valueCnf, host)
if err == nil {
port = cfg.Section("client").Key("port").Validate(func(in string) string {
if in == "" || portflag == 1 {
return port
}
return in
})
}
valueCnf = append(valueCnf, port)
if err == nil {
user = cfg.Section("client").Key("user").Validate(func(in string) string {
if in == "" || userflag == 1 {
return user
}
return in
})
}
valueCnf = append(valueCnf, user)
if err == nil {
password = cfg.Section("client").Key("password").Validate(func(in string) string {
if in == "" || passflag == 1 {
return password
}
return in
})
}
valueCnf = append(valueCnf, password)
return valueCnf
}
|
package config
import (
"bytes"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"testing"
)
var tomlMask = []byte(`
A=1
C=1
[table]
name = "Mask"`)
var tomlBase = []byte(`
A=2
B=1
[table]
name = "Base"`)
var config Config
func init() {
arr := [2]*viper.Viper{Build(tomlMask), Build(tomlBase)}
config = NewFromVipers(arr[:])
}
func TestTopLevel(t *testing.T) {
assert.Equal(t, 1, config.GetInt("A"))
assert.Equal(t, 1, config.GetInt("B"))
assert.Equal(t, 1, config.GetInt("C"))
}
func TestNestedLevel(t *testing.T) {
assert.Equal(t, "Mask", config.GetString("table.name"))
}
func Build(config []byte) *viper.Viper {
r := bytes.NewReader(config)
v := viper.New()
v.SetConfigType("toml")
v.ReadConfig(r)
return v
}
|
package odoo
import (
"fmt"
)
// MailMassMailingTag represents mail.mass_mailing.tag model.
type MailMassMailingTag struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Color *Int `xmlrpc:"color,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// MailMassMailingTags represents array of mail.mass_mailing.tag model.
type MailMassMailingTags []MailMassMailingTag
// MailMassMailingTagModel is the odoo model name.
const MailMassMailingTagModel = "mail.mass_mailing.tag"
// Many2One convert MailMassMailingTag to *Many2One.
func (mmt *MailMassMailingTag) Many2One() *Many2One {
return NewMany2One(mmt.Id.Get(), "")
}
// CreateMailMassMailingTag creates a new mail.mass_mailing.tag model and returns its id.
func (c *Client) CreateMailMassMailingTag(mmt *MailMassMailingTag) (int64, error) {
ids, err := c.CreateMailMassMailingTags([]*MailMassMailingTag{mmt})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateMailMassMailingTag creates a new mail.mass_mailing.tag model and returns its id.
func (c *Client) CreateMailMassMailingTags(mmts []*MailMassMailingTag) ([]int64, error) {
var vv []interface{}
for _, v := range mmts {
vv = append(vv, v)
}
return c.Create(MailMassMailingTagModel, vv)
}
// UpdateMailMassMailingTag updates an existing mail.mass_mailing.tag record.
func (c *Client) UpdateMailMassMailingTag(mmt *MailMassMailingTag) error {
return c.UpdateMailMassMailingTags([]int64{mmt.Id.Get()}, mmt)
}
// UpdateMailMassMailingTags updates existing mail.mass_mailing.tag records.
// All records (represented by ids) will be updated by mmt values.
func (c *Client) UpdateMailMassMailingTags(ids []int64, mmt *MailMassMailingTag) error {
return c.Update(MailMassMailingTagModel, ids, mmt)
}
// DeleteMailMassMailingTag deletes an existing mail.mass_mailing.tag record.
func (c *Client) DeleteMailMassMailingTag(id int64) error {
return c.DeleteMailMassMailingTags([]int64{id})
}
// DeleteMailMassMailingTags deletes existing mail.mass_mailing.tag records.
func (c *Client) DeleteMailMassMailingTags(ids []int64) error {
return c.Delete(MailMassMailingTagModel, ids)
}
// GetMailMassMailingTag gets mail.mass_mailing.tag existing record.
func (c *Client) GetMailMassMailingTag(id int64) (*MailMassMailingTag, error) {
mmts, err := c.GetMailMassMailingTags([]int64{id})
if err != nil {
return nil, err
}
if mmts != nil && len(*mmts) > 0 {
return &((*mmts)[0]), nil
}
return nil, fmt.Errorf("id %v of mail.mass_mailing.tag not found", id)
}
// GetMailMassMailingTags gets mail.mass_mailing.tag existing records.
func (c *Client) GetMailMassMailingTags(ids []int64) (*MailMassMailingTags, error) {
mmts := &MailMassMailingTags{}
if err := c.Read(MailMassMailingTagModel, ids, nil, mmts); err != nil {
return nil, err
}
return mmts, nil
}
// FindMailMassMailingTag finds mail.mass_mailing.tag record by querying it with criteria.
func (c *Client) FindMailMassMailingTag(criteria *Criteria) (*MailMassMailingTag, error) {
mmts := &MailMassMailingTags{}
if err := c.SearchRead(MailMassMailingTagModel, criteria, NewOptions().Limit(1), mmts); err != nil {
return nil, err
}
if mmts != nil && len(*mmts) > 0 {
return &((*mmts)[0]), nil
}
return nil, fmt.Errorf("mail.mass_mailing.tag was not found with criteria %v", criteria)
}
// FindMailMassMailingTags finds mail.mass_mailing.tag records by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailMassMailingTags(criteria *Criteria, options *Options) (*MailMassMailingTags, error) {
mmts := &MailMassMailingTags{}
if err := c.SearchRead(MailMassMailingTagModel, criteria, options, mmts); err != nil {
return nil, err
}
return mmts, nil
}
// FindMailMassMailingTagIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailMassMailingTagIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(MailMassMailingTagModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindMailMassMailingTagId finds record id by querying it with criteria.
func (c *Client) FindMailMassMailingTagId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(MailMassMailingTagModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("mail.mass_mailing.tag was not found with criteria %v and options %v", criteria, options)
}
|
package cockroachdb
import (
"database/sql"
"fmt"
"strconv"
nurl "net/url"
"regexp"
"strings"
"hash/crc32"
"context"
"errors"
"github.com/db-journey/migrate/direction"
"github.com/db-journey/migrate/driver"
"github.com/db-journey/migrate/file"
"github.com/lib/pq"
"github.com/cockroachdb/cockroach-go/crdb"
)
const advisoryLockIdSalt uint = 1486364155
var DefaultMigrationsTable = "schema_migrations"
var DefaultLockTable = "schema_lock"
var (
ErrNilConfig = fmt.Errorf("no config")
ErrNoDatabaseName = fmt.Errorf("no database name")
)
var _ driver.Driver = (*Driver)(nil)
type Config struct {
MigrationsTable string
LockTable string
ForceLock bool
DatabaseName string
}
type Driver struct {
db *sql.DB
config *Config
lockId string
}
const txDisabledOption = "disable_ddl_transaction" // this is a neat feature
var _ driver.Driver = (*Driver)(nil)
// == INTERFACE METHODS: START =================================================
// Initialize opens and verifies the database handle.
func (driver *Driver) Initialize(url string) error {
// fmt.Println("Initialize:", url)
purl, err := nurl.Parse(url)
if err != nil {
return err
}
re := regexp.MustCompile("^(cockroach(db)?|crdb-postgres)")
connectString := re.ReplaceAllString(FilterCustomQuery(purl).String(), "postgres")
db, err := sql.Open("postgres", connectString)
if err != nil {
return err
}
migrationsTable := purl.Query().Get("x-migrations-table")
if len(migrationsTable) == 0 {
migrationsTable = DefaultMigrationsTable
}
lockTable := purl.Query().Get("x-lock-table")
if len(lockTable) == 0 {
lockTable = DefaultLockTable
}
forceLockQuery := purl.Query().Get("x-force-lock")
forceLock, err := strconv.ParseBool(forceLockQuery)
if err != nil {
forceLock = false
}
err = PopulateConfig(driver, db, &Config{
DatabaseName: purl.Path,
MigrationsTable: migrationsTable,
LockTable: lockTable,
ForceLock: forceLock,
})
if err != nil {
return err
}
return lock(driver)
}
// SetDB replaces the current database handle.
func (driver *Driver) SetDB(db *sql.DB) {
// fmt.Println("SetDB")
// driver.db = db
}
// Close closes the database handle.
func (driver *Driver) Close() error {
// fmt.Println("Close")
if driver.lockId == "" {
return driver.db.Close()
}
defer driver.db.Close()
return unlock(driver)
}
// FilenameExtension returns "sql".
func (driver *Driver) FilenameExtension() string {
return "sql"
}
func lock(driver *Driver) error {
// fmt.Println("Initialize: acquire lock, start")
return crdb.ExecuteTx(context.Background(), driver.db, nil, func(tx *sql.Tx) error {
aid, err := GenerateAdvisoryLockId(driver.config.DatabaseName)
if err != nil {
return err
}
query := `SELECT * FROM "` + driver.config.LockTable + `" WHERE lock_id = $1`
rows, err := tx.Query(query, aid)
if err != nil {
return Error{OrigErr: err, Err: "Failed to fetch migration lock", Query: []byte(query)}
}
defer rows.Close()
// If row exists at all, lock is present
locked := rows.Next()
if locked && !driver.config.ForceLock {
return Error{Err: "Lock could not be acquired; already locked", Query: []byte(query)}
}
query = `INSERT INTO "` + driver.config.LockTable + `" (lock_id) VALUES ($1)`
if _, err := tx.Exec(query, aid) ; err != nil {
return Error{OrigErr: err, Err: "Failed to set migration lock", Query: []byte(query)}
}
driver.lockId = aid
// fmt.Println("Initialize: acquire lock, finish")
return nil
})
}
func unlock(driver *Driver) error {
if driver.lockId == "" {
return nil
}
// fmt.Println("Close: release lock, start:", driver.lockId)
query := `DELETE FROM "` + driver.config.LockTable + `" WHERE lock_id = $1`
if _, err := driver.db.Exec(query, driver.lockId); err != nil {
if e, ok := err.(*pq.Error); ok {
// 42P01 is "UndefinedTableError" in CockroachDB
// https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/pgerror/codes.go
if e.Code == "42P01" {
// On drops, the lock table is fully removed; This is fine, and is a valid "unlocked" state for the schema
// fmt.Println("Migrate: release lock, finish with expected error")
return nil
}
}
return Error{OrigErr: err, Err: "failed to release migration lock", Query: []byte(query)}
}
// fmt.Println("Close: release lock, finish")
return nil
}
// Migrate performs the migration of any one file.
func (driver *Driver) Migrate(f file.File, pipe chan interface{}) {
defer close(pipe)
var err error
// var aid string
pipe <- f
if driver.lockId == "" {
pipe <- errors.New("Lock has not been acquired, cannot perform migration: " + f.FileName)
return
}
// (1) read file, (2) apply script, (3) insert version
// fmt.Println("Migrate: read file, start: " + f.FileName)
if err = f.ReadContent(); err != nil {
pipe <- err
return
}
// fmt.Println("Migrate: read file, finish: " + f.FileName)
// cockroach v1 does not allow schema changes and other writes within the same transaction
// so apply the migration first, in a tx unless explicitly opt-out
// then, if successful, update the version in a separate tx
if txDisabled(fileOptions(f.Content)) {
// fmt.Println("Migrate: running script without tx, start")
_, err = driver.db.Exec(string(f.Content))
if err != nil {
pqErr := err.(*pq.Error)
offset, err := strconv.Atoi(pqErr.Position)
if err == nil && offset >= 0 {
lineNo, columnNo := file.LineColumnFromOffset(f.Content, offset-1)
errorPart := file.LinesBeforeAndAfter(f.Content, lineNo, 5, 5, true)
pipe <- fmt.Errorf("%s %v: %s in line %v, column %v:\n\n%s", pqErr.Severity, pqErr.Code, pqErr.Message, lineNo, columnNo, string(errorPart))
} else {
pipe <- fmt.Errorf("%s %v: %s", pqErr.Severity, pqErr.Code, pqErr.Message)
}
return
}
// fmt.Println("Migrate: running script without tx, finish")
} else {
// fmt.Println("Migrate: running script with tx, start")
err = crdb.ExecuteTx(context.Background(), driver.db, nil, func(tx *sql.Tx) error {
_, err = tx.Exec(string(f.Content))
return err
})
if err != nil {
pqErr := err.(*pq.Error)
offset, err := strconv.Atoi(pqErr.Position)
if err == nil && offset >= 0 {
lineNo, columnNo := file.LineColumnFromOffset(f.Content, offset-1)
errorPart := file.LinesBeforeAndAfter(f.Content, lineNo, 5, 5, true)
pipe <- fmt.Errorf("%s %v: %s in line %v, column %v:\n\n%s", pqErr.Severity, pqErr.Code, pqErr.Message, lineNo, columnNo, string(errorPart))
} else {
pipe <- fmt.Errorf("%s %v: %s", pqErr.Severity, pqErr.Code, pqErr.Message)
}
return
}
// fmt.Println("Migrate: running script with tx, finish")
}
// fmt.Println("Migrate: update version, start")
err = crdb.ExecuteTx(context.Background(), driver.db, nil, func(tx *sql.Tx) error {
if f.Direction == direction.Up {
if _, err = tx.Exec(`INSERT INTO "` + driver.config.MigrationsTable + `" (version, name) VALUES ($1, $2)`, f.Version, f.Name); err != nil {
return err
}
} else if f.Direction == direction.Down {
if _, err = tx.Exec(`DELETE FROM "` + driver.config.MigrationsTable + `" WHERE version=$1`, f.Version); err != nil {
return err
}
}
return nil
})
if err != nil {
pipe <- err
// return
}
// fmt.Println("Migrate: update version, finish")
}
// Version returns the current migration version.
func (driver *Driver) Version() (file.Version, error) {
// fmt.Println("Version")
var version file.Version
err := driver.db.QueryRow(`SELECT version FROM "` + driver.config.MigrationsTable + `" ORDER BY version DESC LIMIT 1`).Scan(&version)
if err == sql.ErrNoRows {
return version, nil
}
return version, err
}
// Versions returns the list of applied migrations.
func (driver *Driver) Versions() (file.Versions, error) {
// fmt.Println("Versions")
rows, err := driver.db.Query(`SELECT version FROM "` + driver.config.MigrationsTable + `" ORDER BY version DESC`)
if err != nil {
return nil, err
}
defer rows.Close()
versions := file.Versions{}
for rows.Next() {
var version file.Version
if err = rows.Scan(&version); err != nil {
return nil, err
}
versions = append(versions, version)
}
if err = rows.Err(); err != nil {
return nil, err
}
return versions, err
}
// Execute a SQL statement
func (driver *Driver) Execute(statement string) error {
// fmt.Println("Execute:", statement)
// _, err := driver.db.Exec(statement)
// return err
return errors.New("Execute method not supported")
}
// == INTERFACE METHODS: END ===================================================
// fileOptions returns the list of options extracted from the first line of the file content.
// Format: "-- <option1> <option2> <...>"
func fileOptions(content []byte) []string {
firstLine := strings.SplitN(string(content), "\n", 2)[0]
if !strings.HasPrefix(firstLine, "-- ") {
return []string{}
}
opts := strings.TrimPrefix(firstLine, "-- ")
return strings.Split(opts, " ")
}
func txDisabled(opts []string) bool {
for _, v := range opts {
if v == txDisabledOption {
return true
}
}
return false
}
func (driver *Driver) ensureVersionTable() error {
// check if migration table exists
var count int
query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1`
if err := driver.db.QueryRow(query, driver.config.MigrationsTable).Scan(&count); err != nil {
return &Error{OrigErr: err, Query: []byte(query)}
}
if count == 1 {
return nil
}
// if not, create the empty migration table
query = `CREATE TABLE IF NOT EXISTS "` + driver.config.MigrationsTable + `" (version BIGINT NOT NULL PRIMARY KEY, name STRING(255) NOT NULL, applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP())`
if _, err := driver.db.Exec(query); err != nil {
return &Error{OrigErr: err, Query: []byte(query)}
}
return nil
}
func (driver *Driver) ensureLockTable() error {
// check if lock table exists
var count int
query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = $1 AND table_schema = (SELECT current_schema()) LIMIT 1`
if err := driver.db.QueryRow(query, driver.config.LockTable).Scan(&count); err != nil {
return &Error{OrigErr: err, Query: []byte(query)}
}
if count == 1 {
return nil
}
// if not, create the empty lock table
query = `CREATE TABLE IF NOT EXISTS "` + driver.config.LockTable + `" (lock_id INT NOT NULL PRIMARY KEY)`
if _, err := driver.db.Exec(query); err != nil {
return &Error{OrigErr: err, Query: []byte(query)}
}
return nil
}
// FilterCustomQuery filters all query values starting with `x-`
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
ux := *u
vx := make(nurl.Values)
for k, v := range ux.Query() {
if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") {
vx[k] = v
}
}
ux.RawQuery = vx.Encode()
return &ux
}
func PopulateConfig(driver *Driver, instance *sql.DB, config *Config) error {
if config == nil {
return ErrNilConfig
}
if err := instance.Ping(); err != nil {
return err
}
query := `SELECT current_database()`
var databaseName string
if err := instance.QueryRow(query).Scan(&databaseName); err != nil {
return &Error{OrigErr: err, Query: []byte(query)}
}
if len(databaseName) == 0 {
return ErrNoDatabaseName
}
config.DatabaseName = databaseName
if len(config.MigrationsTable) == 0 {
config.MigrationsTable = DefaultMigrationsTable
}
if len(config.LockTable) == 0 {
config.LockTable = DefaultLockTable
}
driver.db = instance
driver.config = config
if err := driver.ensureVersionTable(); err != nil {
return err
}
if err := driver.ensureLockTable(); err != nil {
return err
}
return nil
}
// Error should be used for errors involving queries ran against the database
type Error struct {
// Optional: the line number
Line uint
// Query is a query excerpt
Query []byte
// Err is a useful/helping error message for humans
Err string
// OrigErr is the underlying error
OrigErr error
}
func (e Error) Error() string {
if len(e.Err) == 0 {
return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query)
}
return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr)
}
func GenerateAdvisoryLockId(databaseName string) (string, error) {
sum := crc32.ChecksumIEEE([]byte(databaseName))
sum = sum * uint32(advisoryLockIdSalt)
return fmt.Sprintf("%v", sum), nil
}
func init() {
drv := Driver{}
driver.RegisterDriver("cockroach", &drv)
driver.RegisterDriver("cockroachdb", &drv)
driver.RegisterDriver("crdb-postgres", &drv)
}
|
package storage
import "github.com/suaas21/grapgql-demo/books-authors-query/model"
var ListBook = make([]model.Book, 0)
var ListAuthor = make([]model.Author, 0)
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gardensetup_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/gardener/test-infra/pkg/common"
"github.com/gardener/test-infra/pkg/util/gardensetup"
)
var _ = Describe("gardensetup extensions util", func() {
Context("Merge", func() {
It("should merge two extensions with the same keys", func() {
base := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "a1"},
"key2": common.GSExtensionConfig{Repository: "a2"},
}
newVal := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "b1"},
"key2": common.GSExtensionConfig{Repository: "b2"},
}
expected := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "b1"},
"key2": common.GSExtensionConfig{Repository: "b2"},
}
res := gardensetup.MergeExtensions(base, newVal)
Expect(res).To(Equal(expected))
})
It("should keep base key if their are not defined by the new value", func() {
base := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "a1"},
"key2": common.GSExtensionConfig{Repository: "a2"},
}
newVal := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "b1"},
}
expected := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "b1"},
"key2": common.GSExtensionConfig{Repository: "a2"},
}
res := gardensetup.MergeExtensions(base, newVal)
Expect(res).To(Equal(expected))
})
It("should add additional values defined by the new key to existing base keys", func() {
base := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "a1"},
"key2": common.GSExtensionConfig{Repository: "a2"},
}
newVal := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "b1"},
"key3": common.GSExtensionConfig{Repository: "b1"},
}
expected := common.GSExtensions{
"key1": common.GSExtensionConfig{Repository: "b1"},
"key2": common.GSExtensionConfig{Repository: "a2"},
"key3": common.GSExtensionConfig{Repository: "b1"},
}
res := gardensetup.MergeExtensions(base, newVal)
Expect(res).To(Equal(expected))
})
})
Context("parse flag", func() {
It("should parse one extensions definition with a valid version", func() {
f := "ext1=repo1::0.0.1"
res, err := gardensetup.ParseFlag(f)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(common.GSExtensions{
"ext1": common.GSExtensionConfig{Repository: "repo1", Tag: "0.0.1"},
}))
})
It("should parse one extensions definition with a commit", func() {
f := "ext1=repo1::000000000a000000b0000000000000c000000000"
res, err := gardensetup.ParseFlag(f)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(common.GSExtensions{
"ext1": common.GSExtensionConfig{Repository: "repo1", Commit: "000000000a000000b0000000000000c000000000", ImageTag: "000000000a000000b0000000000000c000000000"},
}))
})
It("should parse one extensions definition with a branch", func() {
f := "ext1=repo1::patch-1"
res, err := gardensetup.ParseFlag(f)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(common.GSExtensions{
"ext1": common.GSExtensionConfig{Repository: "repo1", Branch: "patch-1"},
}))
})
It("should parse multiple extension definitions", func() {
f := "ext1=repo1::0.0.1,ext2=repo2::0.0.0"
res, err := gardensetup.ParseFlag(f)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(Equal(common.GSExtensions{
"ext1": common.GSExtensionConfig{Repository: "repo1", Tag: "0.0.1"},
"ext2": common.GSExtensionConfig{Repository: "repo2", Tag: "0.0.0"},
}))
})
})
})
|
package channel
import "testing"
func BenchmarkStart(b *testing.B) {
Start(b.N)
}
|
package acrostic
// Part : 品詞
// 接尾辞は品詞ではないが,処理の都合上,分けるのは面倒なので,品詞の一つとみなす
type Part int
const (
// UnknownPart : 不明な品詞
UnknownPart Part = iota
// VerbPart : 動詞
VerbPart
// AdjectivePart : 形容詞
AdjectivePart
// AdjectiveVerbPart : 形容動詞
AdjectiveVerbPart
// NounPart : 名詞
NounPart
// AdnominalPart : 連体詞
AdnominalPart
// AdverbPart : 副詞
AdverbPart
// ConjunctionPart : 接続詞
ConjunctionPart
// EmotiveVerbPart : 感動詞
EmotiveVerbPart
// AuxiliaryVerbPart : 助動詞
AuxiliaryVerbPart
// ParticlePart : 助詞
ParticlePart
// SuffixPart : 接尾辞(品詞ではない)
SuffixPart
// PrefixPart : 接頭辞(品詞ではない)
PrefixPart
// SpecialPart : 特殊(句読点等,当然品詞ではない)
SpecialPart
// DeterminePart : 判定詞(だ)
DeterminePart
// DemonstrativePart : 指示詞
DemonstrativePart
)
func (p Part) String() string {
switch p {
case VerbPart:
return "動詞"
case AdjectivePart:
return "形容詞"
case AdjectiveVerbPart:
return "形容動詞"
case NounPart:
return "名詞"
case AdnominalPart:
return "連体詞"
case AdverbPart:
return "副詞"
case ConjunctionPart:
return "接続詞"
case EmotiveVerbPart:
return "感動詞"
case AuxiliaryVerbPart:
return "助動詞"
case ParticlePart:
return "助詞"
case SuffixPart:
return "接尾辞"
case PrefixPart:
return "接頭辞"
case SpecialPart:
return "特殊"
case DeterminePart:
return "判定詞"
case DemonstrativePart:
return "指示詞"
default:
return "不明"
}
}
func (p Part) Rune() []rune {
return []rune(p.String())
}
func NewPart(s []rune) Part {
t := string(s)
switch t {
case "動詞":
return VerbPart
case "形容詞":
return AdjectivePart
case "形容動詞":
return AdjectiveVerbPart
case "名詞":
return NounPart
case "連体詞":
return AdnominalPart
case "副詞":
return AdverbPart
case "接続詞":
return ConjunctionPart
case "感動詞":
return EmotiveVerbPart
case "助動詞":
return AuxiliaryVerbPart
case "助詞":
return ParticlePart
case "接尾辞":
return SuffixPart
case "接頭辞":
return PrefixPart
case "特殊":
return SpecialPart
case "判定詞":
return DeterminePart
case "指示詞":
return DemonstrativePart
default:
return UnknownPart
}
}
// IsIndependent : 自立語かどうかを判定
// return : trueならばその品詞は自立語,falseならばその品詞は付属語または不明な品詞
func (p Part) IsIndependent() bool {
return !p.IsAdjunct() && !p.IsSuffix() && !p.IsSpecial() && p != UnknownPart
}
// IsAdjunct : 付属語かどうかを判定
// return trueならば付属語
func (p Part) IsAdjunct() bool {
return (p == AuxiliaryVerbPart || p == ParticlePart)
}
// IsSuffix : 接尾辞かどうかを判定
// return trueならば接尾辞
func (p Part) IsSuffix() bool {
return p == SuffixPart
}
// IsSpecial : 特殊かどうかを判定
// return trueならば特殊
func (p Part) IsSpecial() bool {
return p == SpecialPart
}
// Inflection : 語形変化するかどうかを判定
// return : trueならば語形変化する
func (p Part) IsFlection() bool {
return p == VerbPart || p == AuxiliaryVerbPart
}
|
package passport
import "testing"
func TestPresentValidator(t *testing.T) {
validator := PresentValidator("foo")
cases := []struct {
passport Passport
expected bool
}{
{Passport{fields: map[string]string{}}, false},
{Passport{fields: map[string]string{"foo": "bar"}}, true},
{Passport{fields: map[string]string{"foo": "bar", "baz": "123"}}, true},
{Passport{fields: map[string]string{"baz": "123"}}, false},
}
for _, c := range cases {
if validator(c.passport) != c.expected {
t.Error()
}
}
}
func TestIntValidator(t *testing.T) {
validator := IntValidator("foo", 2, 4)
cases := []struct {
passport Passport
expected bool
}{
{Passport{fields: map[string]string{}}, false},
{Passport{fields: map[string]string{"foo": "bar"}}, false},
{Passport{fields: map[string]string{"foo": "2"}}, true},
{Passport{fields: map[string]string{"foo": "3", "baz": "123"}}, true},
{Passport{fields: map[string]string{"foo": "5"}}, false},
{Passport{fields: map[string]string{"baz": "123"}}, false},
}
for _, c := range cases {
if validator(c.passport) != c.expected {
t.Error()
}
}
}
func TestSizeValidator(t *testing.T) {
validator := SizeValidator("foo", "px", 2, 4)
cases := []struct {
passport Passport
expected bool
}{
{Passport{fields: map[string]string{}}, false},
{Passport{fields: map[string]string{"foo": "bar"}}, false},
{Passport{fields: map[string]string{"foo": "2"}}, false},
{Passport{fields: map[string]string{"foo": "2px"}}, true},
{Passport{fields: map[string]string{"foo": "3px", "baz": "123"}}, true},
{Passport{fields: map[string]string{"foo": "2cm"}}, false},
{Passport{fields: map[string]string{"baz": "123"}}, false},
}
for _, c := range cases {
if validator(c.passport) != c.expected {
t.Error()
}
}
}
func TestPatternValidator(t *testing.T) {
validator := PatternValidator("foo", `^[0-4a-c]{3}$`)
cases := []struct {
passport Passport
expected bool
}{
{Passport{fields: map[string]string{}}, false},
{Passport{fields: map[string]string{"foo": "bar"}}, false},
{Passport{fields: map[string]string{"foo": "abc"}}, true},
{Passport{fields: map[string]string{"foo": "abcd"}}, false},
{Passport{fields: map[string]string{"foo": "abb", "baz": "123"}}, true},
{Passport{fields: map[string]string{"baz": "123"}}, false},
}
for _, c := range cases {
if validator(c.passport) != c.expected {
t.Error()
}
}
}
|
package main
import (
"math/rand"
"time"
)
func main() {
ci:=make(chan int,1)
// send to channel
rand.Seed(time.Now().UnixNano())
ci <- rand.Intn(100) //send to channel
b := <-ci // receive from channel
println(b)
}
|
package runtime
// Process the elements of a function call form
func EvalFnCall(env Env, fn Callable, args Sequence) (Value, error) {
args, err := EvalEach(env, args)
if err != nil {
return nil, err
}
return &Call{
Fn: fn,
Args: args,
Env: env,
}, nil
}
|
package usecases
import (
"math"
"regexp"
"sort"
"strings"
)
// SearchServiceImpl is an implementation of the SearchService
type SearchServiceImpl struct {
wordRegexp *regexp.Regexp
termCounts map[string]float64
corpus map[string]SearchObject
}
// NewSearchServiceImpl returns a new instance of SearchServiceImpl
func NewSearchServiceImpl() *SearchServiceImpl {
return &SearchServiceImpl{
wordRegexp: regexp.MustCompile(`\w+`),
termCounts: make(map[string]float64),
corpus: make(map[string]SearchObject),
}
}
// Index adds the given object to the search index
func (s *SearchServiceImpl) Index(obj SearchObject) {
// get term counts and add them to term counts list
terms := s.getTermCounts(obj.Content)
for term := range terms {
s.termCounts[term] += 1.0
}
// add search object to document corpus
obj.Terms = terms
s.corpus[obj.ID] = obj
}
// Search returns the list of relevant results
func (s *SearchServiceImpl) Search(objtype string, query string) []SearchObject {
queryTerms := s.wordRegexp.FindAllString(strings.ToLower(query), -1)
results := make([]SearchObject, 0)
for _, doc := range s.corpus {
score := 0.0
if doc.Type == objtype {
for _, queryTerm := range queryTerms {
if docTermFreq, ok := doc.Terms[queryTerm]; ok {
score += docTermFreq * s.getIDF(queryTerm)
}
}
// add to result if score is above cutoff
score /= float64(len(queryTerms))
if score > ScoreCutoff {
doc.Score = score
results = append(results, doc)
}
}
}
// sort search results by score
sort.Slice(results, func(i, j int) bool {
return results[i].Score > results[j].Score
})
return results
}
// getTermCounts extracts the query terms into a list and gives them normalized weights
func (s *SearchServiceImpl) getTermCounts(query string) map[string]float64 {
terms := s.wordRegexp.FindAllString(strings.ToLower(query), -1)
// build list of terms and their counts
termCounts := make(map[string]float64)
for _, term := range terms {
termCounts[term] += 1.0
}
// normalise term count list
length := float64(len(terms))
for key, val := range termCounts {
termCounts[key] = val / length
}
return termCounts
}
// getIDF gets the inverse of the number of documents with the term in it
func (s *SearchServiceImpl) getIDF(term string) float64 {
docCount := 0.0
for _, doc := range s.corpus {
for docTerm := range doc.Terms {
if term == docTerm {
docCount += 1.0
}
}
}
return math.Log(float64(len(s.corpus)) / (docCount + 1.0))
}
|
package router
type chiRouter struct {}
var chiDispatcjer = chi.NewRouter()
// NewChiRouter creates a new chi router
func NewChiRouter() Router {
chiDispatcher.Use(middleware.RequestID)
chiDispatcher.Use(middleware.RealIP)
chiDispatcher.Use(middleware.Logger)
chiDispatcher.Use(middleware.Recoverer)
return &chiRouter{}
}
func (*chiRouter) POST(uri string, f func(w http.ResponseWriter, r *http.Request)) {
chiDispatcher.Post(uri, f)
}
func (*chiRouter) GET(uri string, f func(w http.ResponseWriter, r *http.Request)) {
chiDispatcher.Get(uri, f)
}
func (*chiRouter) SERVE(port string) {
fmt.Printf("Chi HTTP server running on port %v\n", port)
http.ListenAndServe(port, chiDispatcher)
}
|
package server
//HTTPErrorResponse dd
type HTTPErrorResponse struct {
Err bool `json:"err"`
Reason string `json:"reason"`
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
import (
"fmt"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/usermem"
)
// We unconditionally report a single NUMA node. This also means that our
// "nodemask_t" is a single unsigned long (uint64).
const (
maxNodes = 1
allowedNodemask = (1 << maxNodes) - 1
)
func copyInNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32) (uint64, error) {
// "nodemask points to a bit mask of node IDs that contains up to maxnode
// bits. The bit mask size is rounded to the next multiple of
// sizeof(unsigned long), but the kernel will use bits only up to maxnode.
// A NULL value of nodemask or a maxnode value of zero specifies the empty
// set of nodes. If the value of maxnode is zero, the nodemask argument is
// ignored." - set_mempolicy(2). Unfortunately, most of this is inaccurate
// because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses
// maxnode-1, not maxnode, as the number of bits.
bits := maxnode - 1
if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
return 0, linuxerr.EINVAL
}
if bits == 0 {
return 0, nil
}
// Copy in the whole nodemask.
numUint64 := (bits + 63) / 64
buf := t.CopyScratchBuffer(int(numUint64) * 8)
if _, err := t.CopyInBytes(addr, buf); err != nil {
return 0, err
}
val := hostarch.ByteOrder.Uint64(buf)
// Check that only allowed bits in the first unsigned long in the nodemask
// are set.
if val&^allowedNodemask != 0 {
return 0, linuxerr.EINVAL
}
// Check that all remaining bits in the nodemask are 0.
for i := 8; i < len(buf); i++ {
if buf[i] != 0 {
return 0, linuxerr.EINVAL
}
}
return val, nil
}
func copyOutNodemask(t *kernel.Task, addr hostarch.Addr, maxnode uint32, val uint64) error {
// mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of
// bits.
bits := maxnode - 1
if bits > hostarch.PageSize*8 { // also handles overflow from maxnode == 0
return linuxerr.EINVAL
}
if bits == 0 {
return nil
}
// Copy out the first unsigned long in the nodemask.
buf := t.CopyScratchBuffer(8)
hostarch.ByteOrder.PutUint64(buf, val)
if _, err := t.CopyOutBytes(addr, buf); err != nil {
return err
}
// Zero out remaining unsigned longs in the nodemask.
if bits > 64 {
remAddr, ok := addr.AddLength(8)
if !ok {
return linuxerr.EFAULT
}
remUint64 := (bits - 1) / 64
if _, err := t.MemoryManager().ZeroOut(t, remAddr, int64(remUint64)*8, usermem.IOOpts{
AddressSpaceActive: true,
}); err != nil {
return err
}
}
return nil
}
// GetMempolicy implements the syscall get_mempolicy(2).
func GetMempolicy(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
mode := args[0].Pointer()
nodemask := args[1].Pointer()
maxnode := args[2].Uint()
addr := args[3].Pointer()
flags := args[4].Uint()
if flags&^(linux.MPOL_F_NODE|linux.MPOL_F_ADDR|linux.MPOL_F_MEMS_ALLOWED) != 0 {
return 0, nil, linuxerr.EINVAL
}
nodeFlag := flags&linux.MPOL_F_NODE != 0
addrFlag := flags&linux.MPOL_F_ADDR != 0
memsAllowed := flags&linux.MPOL_F_MEMS_ALLOWED != 0
// "EINVAL: The value specified by maxnode is less than the number of node
// IDs supported by the system." - get_mempolicy(2)
if nodemask != 0 && maxnode < maxNodes {
return 0, nil, linuxerr.EINVAL
}
// "If flags specifies MPOL_F_MEMS_ALLOWED [...], the mode argument is
// ignored and the set of nodes (memories) that the thread is allowed to
// specify in subsequent calls to mbind(2) or set_mempolicy(2) (in the
// absence of any mode flags) is returned in nodemask."
if memsAllowed {
// "It is not permitted to combine MPOL_F_MEMS_ALLOWED with either
// MPOL_F_ADDR or MPOL_F_NODE."
if nodeFlag || addrFlag {
return 0, nil, linuxerr.EINVAL
}
if err := copyOutNodemask(t, nodemask, maxnode, allowedNodemask); err != nil {
return 0, nil, err
}
return 0, nil, nil
}
// "If flags specifies MPOL_F_ADDR, then information is returned about the
// policy governing the memory address given in addr. ... If the mode
// argument is not NULL, then get_mempolicy() will store the policy mode
// and any optional mode flags of the requested NUMA policy in the location
// pointed to by this argument. If nodemask is not NULL, then the nodemask
// associated with the policy will be stored in the location pointed to by
// this argument."
if addrFlag {
policy, nodemaskVal, err := t.MemoryManager().NumaPolicy(addr)
if err != nil {
return 0, nil, err
}
if nodeFlag {
// "If flags specifies both MPOL_F_NODE and MPOL_F_ADDR,
// get_mempolicy() will return the node ID of the node on which the
// address addr is allocated into the location pointed to by mode.
// If no page has yet been allocated for the specified address,
// get_mempolicy() will allocate a page as if the thread had
// performed a read (load) access to that address, and return the
// ID of the node where that page was allocated."
buf := t.CopyScratchBuffer(1)
_, err := t.CopyInBytes(addr, buf)
if err != nil {
return 0, nil, err
}
policy = linux.MPOL_DEFAULT // maxNodes == 1
}
if mode != 0 {
if _, err := policy.CopyOut(t, mode); err != nil {
return 0, nil, err
}
}
if nodemask != 0 {
if err := copyOutNodemask(t, nodemask, maxnode, nodemaskVal); err != nil {
return 0, nil, err
}
}
return 0, nil, nil
}
// "EINVAL: ... flags specified MPOL_F_ADDR and addr is NULL, or flags did
// not specify MPOL_F_ADDR and addr is not NULL." This is partially
// inaccurate: if flags specifies MPOL_F_ADDR,
// mm/mempolicy.c:do_get_mempolicy() doesn't special-case NULL; it will
// just (usually) fail to find a VMA at address 0 and return EFAULT.
if addr != 0 {
return 0, nil, linuxerr.EINVAL
}
// "If flags is specified as 0, then information about the calling thread's
// default policy (as set by set_mempolicy(2)) is returned, in the buffers
// pointed to by mode and nodemask. ... If flags specifies MPOL_F_NODE, but
// not MPOL_F_ADDR, and the thread's current policy is MPOL_INTERLEAVE,
// then get_mempolicy() will return in the location pointed to by a
// non-NULL mode argument, the node ID of the next node that will be used
// for interleaving of internal kernel pages allocated on behalf of the
// thread."
policy, nodemaskVal := t.NumaPolicy()
if nodeFlag {
if policy&^linux.MPOL_MODE_FLAGS != linux.MPOL_INTERLEAVE {
return 0, nil, linuxerr.EINVAL
}
policy = linux.MPOL_DEFAULT // maxNodes == 1
}
if mode != 0 {
if _, err := policy.CopyOut(t, mode); err != nil {
return 0, nil, err
}
}
if nodemask != 0 {
if err := copyOutNodemask(t, nodemask, maxnode, nodemaskVal); err != nil {
return 0, nil, err
}
}
return 0, nil, nil
}
// SetMempolicy implements the syscall set_mempolicy(2).
func SetMempolicy(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
modeWithFlags := linux.NumaPolicy(args[0].Int())
nodemask := args[1].Pointer()
maxnode := args[2].Uint()
modeWithFlags, nodemaskVal, err := copyInMempolicyNodemask(t, modeWithFlags, nodemask, maxnode)
if err != nil {
return 0, nil, err
}
t.SetNumaPolicy(modeWithFlags, nodemaskVal)
return 0, nil, nil
}
// Mbind implements the syscall mbind(2).
func Mbind(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
addr := args[0].Pointer()
length := args[1].Uint64()
mode := linux.NumaPolicy(args[2].Int())
nodemask := args[3].Pointer()
maxnode := args[4].Uint()
flags := args[5].Uint()
if flags&^linux.MPOL_MF_VALID != 0 {
return 0, nil, linuxerr.EINVAL
}
// "If MPOL_MF_MOVE_ALL is passed in flags ... [the] calling thread must be
// privileged (CAP_SYS_NICE) to use this flag." - mbind(2)
if flags&linux.MPOL_MF_MOVE_ALL != 0 && !t.HasCapability(linux.CAP_SYS_NICE) {
return 0, nil, linuxerr.EPERM
}
mode, nodemaskVal, err := copyInMempolicyNodemask(t, mode, nodemask, maxnode)
if err != nil {
return 0, nil, err
}
// Since we claim to have only a single node, all flags can be ignored
// (since all pages must already be on that single node).
err = t.MemoryManager().SetNumaPolicy(addr, length, mode, nodemaskVal)
return 0, nil, err
}
func copyInMempolicyNodemask(t *kernel.Task, modeWithFlags linux.NumaPolicy, nodemask hostarch.Addr, maxnode uint32) (linux.NumaPolicy, uint64, error) {
flags := linux.NumaPolicy(modeWithFlags & linux.MPOL_MODE_FLAGS)
mode := linux.NumaPolicy(modeWithFlags &^ linux.MPOL_MODE_FLAGS)
if flags == linux.MPOL_MODE_FLAGS {
// Can't specify both mode flags simultaneously.
return 0, 0, linuxerr.EINVAL
}
if mode < 0 || mode >= linux.MPOL_MAX {
// Must specify a valid mode.
return 0, 0, linuxerr.EINVAL
}
var nodemaskVal uint64
if nodemask != 0 {
var err error
nodemaskVal, err = copyInNodemask(t, nodemask, maxnode)
if err != nil {
return 0, 0, err
}
}
switch mode {
case linux.MPOL_DEFAULT:
// "nodemask must be specified as NULL." - set_mempolicy(2). This is inaccurate;
// Linux allows a nodemask to be specified, as long as it is empty.
if nodemaskVal != 0 {
return 0, 0, linuxerr.EINVAL
}
case linux.MPOL_BIND, linux.MPOL_INTERLEAVE:
// These require a non-empty nodemask.
if nodemaskVal == 0 {
return 0, 0, linuxerr.EINVAL
}
case linux.MPOL_PREFERRED:
// This permits an empty nodemask, as long as no flags are set.
if nodemaskVal == 0 {
if flags != 0 {
return 0, 0, linuxerr.EINVAL
}
// On newer Linux versions, MPOL_PREFERRED is implemented as MPOL_LOCAL
// when node set is empty. See 7858d7bca7fb ("mm/mempolicy: don't handle
// MPOL_LOCAL like a fake MPOL_PREFERRED policy").
mode = linux.MPOL_LOCAL
}
case linux.MPOL_LOCAL:
// This requires an empty nodemask and no flags set.
if nodemaskVal != 0 || flags != 0 {
return 0, 0, linuxerr.EINVAL
}
default:
// Unknown mode, which we should have rejected above.
panic(fmt.Sprintf("unknown mode: %v", mode))
}
return mode | flags, nodemaskVal, nil
}
|
package uweb
import (
"compress/gzip"
"net/http"
"strings"
"sync"
)
// if body length less than this, no need to compress
var (
GZIP_THRESHOLD = 150
)
//
// Compress middleware, only support gzip.
//
func MdCompress() Middleware {
return NewGzip()
}
//
// @impl(http.ResponseWriter)
//
type gzipWriter struct {
http.ResponseWriter
w *gzip.Writer
}
// hide Write in http.ResponseWriter
func (g *gzipWriter) Write(data []byte) (int, error) {
return g.w.Write(data)
}
//
// Gzip compress
//
type Gzip struct {
pool sync.Pool // cache gzip.Writer
}
// Create gzip middleware
func NewGzip() *Gzip {
g := new(Gzip)
g.pool.New = func() interface{} {
return gzip.NewWriter(nil)
}
return g
}
func (g *Gzip) Name() string {
return "compress"
}
// @impl Middleware
func (g *Gzip) Handle(c *Context) int {
// bypass some files
if g.bypass(c.Req) {
return NEXT_CONTINUE
}
// next to get response data
c.Next()
// if error
if c.Res.Err != nil {
return NEXT_CONTINUE
}
// small body
if len(c.Res.Body) < GZIP_THRESHOLD {
return NEXT_CONTINUE
}
// empty status
switch c.Res.Status {
case 204, 205, 304:
return NEXT_CONTINUE
}
// if compressed
if len(c.Req.Header.Get("Content-Encoding")) > 0 {
return NEXT_CONTINUE
}
// set headers
h := c.Res.Header()
h.Set("Vary", "Accept-Encoding")
h.Set("Content-Encoding", "gzip")
h.Del("Content-Length")
// write and close
rw := c.Res.ResponseWriter
gw := g.pool.Get().(*gzip.Writer)
gw.Reset(rw)
c.Res.ResponseWriter = &gzipWriter{rw, gw}
c.Res.Close = func() {
gw.Flush() // flush pending buffer
g.pool.Put(gw)
}
// ok
return NEXT_CONTINUE
}
// by pass some requests
func (g *Gzip) bypass(req *Request) bool {
// accept encoding?
if !strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") {
return true
}
// ignore HEAD
if req.Method == "HEAD" || req.Method == "OPTIONS" {
return true
}
// ignore websocket
if len(req.Header.Get("Sec-WebSocket-Key")) > 0 {
return true
}
// ok
return false
}
|
package quic
import (
"fmt"
capnp "zombiezen.com/go/capnproto2"
"zombiezen.com/go/capnproto2/pogs"
"github.com/cloudflare/cloudflared/quic/schema"
)
// ConnectionType indicates the type of underlying connection proxied within the QUIC stream.
type ConnectionType uint16
const (
ConnectionTypeHTTP ConnectionType = iota
ConnectionTypeWebsocket
ConnectionTypeTCP
)
func (c ConnectionType) String() string {
switch c {
case ConnectionTypeHTTP:
return "http"
case ConnectionTypeWebsocket:
return "ws"
case ConnectionTypeTCP:
return "tcp"
}
panic(fmt.Sprintf("invalid ConnectionType: %d", c))
}
// ConnectRequest is the representation of metadata sent at the start of a QUIC application handshake.
type ConnectRequest struct {
Dest string `capnp:"dest"`
Type ConnectionType `capnp:"type"`
Metadata []Metadata `capnp:"metadata"`
}
// Metadata is a representation of key value based data sent via RequestMeta.
type Metadata struct {
Key string `capnp:"key"`
Val string `capnp:"val"`
}
// MetadataMap returns a map format of []Metadata.
func (r *ConnectRequest) MetadataMap() map[string]string {
metadataMap := make(map[string]string)
for _, metadata := range r.Metadata {
metadataMap[metadata.Key] = metadata.Val
}
return metadataMap
}
func (r *ConnectRequest) fromPogs(msg *capnp.Message) error {
metadata, err := schema.ReadRootConnectRequest(msg)
if err != nil {
return err
}
return pogs.Extract(r, schema.ConnectRequest_TypeID, metadata.Struct)
}
func (r *ConnectRequest) toPogs() (*capnp.Message, error) {
msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
if err != nil {
return nil, err
}
root, err := schema.NewRootConnectRequest(seg)
if err != nil {
return nil, err
}
if err := pogs.Insert(schema.ConnectRequest_TypeID, root.Struct, r); err != nil {
return nil, err
}
return msg, nil
}
// ConnectResponse is a representation of metadata sent as a response to a QUIC application handshake.
type ConnectResponse struct {
Error string `capnp:"error"`
Metadata []Metadata `capnp:"metadata"`
}
func (r *ConnectResponse) fromPogs(msg *capnp.Message) error {
metadata, err := schema.ReadRootConnectResponse(msg)
if err != nil {
return err
}
return pogs.Extract(r, schema.ConnectResponse_TypeID, metadata.Struct)
}
func (r *ConnectResponse) toPogs() (*capnp.Message, error) {
msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
if err != nil {
return nil, err
}
root, err := schema.NewRootConnectResponse(seg)
if err != nil {
return nil, err
}
if err := pogs.Insert(schema.ConnectResponse_TypeID, root.Struct, r); err != nil {
return nil, err
}
return msg, nil
}
|
package models_test
import (
. "github.com/gravida/work/models"
"github.com/gravida/work/pkg/settings"
_ "github.com/mattn/go-sqlite3"
. "github.com/smartystreets/goconvey/convey"
"os"
"testing"
)
func TestWork(t *testing.T) {
settings.DatabaseCfg.Type = "sqlite3"
settings.DatabaseCfg.Path = "data.db"
Setup()
defer func() {
os.Remove("./data.db")
}()
Convey("添加work", t, func() {
w := Work{Uid: 1, Name: "测试"}
err := AddWork(&w)
So(err, ShouldBeNil)
w = Work{Uid: 2, Name: "测试"}
err = AddWork(&w)
So(err, ShouldBeNil)
})
Convey("判断work名称", t, func() {
exist, err := ExistWorkByName(1, "测试")
So(err, ShouldBeNil)
So(exist, ShouldEqual, true)
exist, err = ExistWorkByName(1, "测试1")
So(err, ShouldBeNil)
So(exist, ShouldEqual, false)
exist, err = ExistWorkByName(3, "测试")
So(err, ShouldBeNil)
So(exist, ShouldEqual, false)
})
Convey("获取works", t, func() {
works, err := QueryAllWorks(1, 20)
for i := 0; i < len(works); i++ {
t.Log(works[i])
}
So(err, ShouldBeNil)
})
}
|
package config
type CloudStorageConfig struct {
Bucket string `yaml:"bucket"` //bucket name of s3 or bos
Path string `yaml:"path"` //path in the bucket
Ak string `yaml:"ak"` //access key
Sk string `yaml:"sk"` //secrete key
Region string `yaml:"region"` //region, eg. bj
Endpoint string `yaml:"endpoint"` //endpoint, eg. s3.bj.bcebos.com
LocalCacheDir string `yaml:"localCacheDir"` //cache directory on local disk
}
func NewCloudStorageConfig() *CloudStorageConfig {
cStorageConfig := &CloudStorageConfig{}
cStorageConfig.defaultCloudStorageConfig()
return cStorageConfig
}
func (c *CloudStorageConfig) defaultCloudStorageConfig() {
c.Bucket = "xchain-cloud-test"
c.Path = "node1"
c.Ak = ""
c.Sk = ""
c.Region = "bj"
c.Endpoint = "s3.bj.bcebos.com"
c.LocalCacheDir = "./data/cache"
}
|
package lex
import (
"fmt"
"github.com/felixangell/goof/cc/unit"
"strings"
"unicode"
)
var ErrorToken *unit.Token = unit.NewToken("error", unit.Invalid)
type Lexer struct {
file *unit.SourceFile
position uint
}
func (lexer *Lexer) ExecutePhase(file *unit.SourceFile) {
// TODO(Felix): resets in each phase execution
lexer.file = file
tokens := []*unit.Token{}
for !lexer.eof() {
lexer.consumeWhile(isJunk, unit.Invalid)
token := lexer.getToken()
if token != ErrorToken {
tokens = append(tokens, token)
}
lexer.consumeWhile(isJunk, unit.Invalid)
}
// TODO dump this per file
fmt.Println("Token Stream")
for _, tok := range tokens {
fmt.Println(tok.String())
}
// put our EOF token on the end
// this is just a little hack for
// parsing
tokens = append(tokens, unit.NewToken("", unit.EOF))
lexer.file.Tokens = tokens
}
func (l *Lexer) eof() bool {
return l.position >= l.file.Length-1
}
func (l *Lexer) peek(offset uint) rune {
return []rune(l.file.Contents)[l.position+offset]
}
func (l *Lexer) consume() rune {
consumed := []rune(l.file.Contents)[l.position]
l.position++
return consumed
}
func (l *Lexer) consumeWhile(condition func(rune) bool, kind unit.TokenType) *unit.Token {
lexeme := []rune{}
for condition(l.peek(0)) && !l.eof() {
lexeme = append(lexeme, l.consume())
}
return unit.NewToken(string(lexeme), kind)
}
func (l *Lexer) consumeSymbol() *unit.Token {
lexeme := string(l.consume())
return unit.NewToken(lexeme, unit.Symbol)
}
func (l *Lexer) consumePair() *unit.Token {
lexeme := string(l.consume())
return unit.NewToken(lexeme, unit.Pair)
}
func (l *Lexer) consumeString() *unit.Token {
opening := string(l.consume())
tok := l.consumeWhile(func(r rune) bool {
return r != '"'
}, unit.String)
closing := string(l.consume())
if opening != closing {
panic("shit")
return ErrorToken
}
tok.SetLexeme(opening + tok.GetLexeme() + closing)
return tok
}
func (l *Lexer) consumeCharacter() *unit.Token {
opening := string(l.consume())
tok := l.consumeWhile(func(r rune) bool {
return r != '\''
}, unit.Character)
closing := string(l.consume())
if opening != closing {
panic("this shouldn't really happen to be honest")
return ErrorToken
}
tok.SetLexeme(opening + tok.GetLexeme() + closing)
return tok
}
func (l *Lexer) getToken() *unit.Token {
c := l.peek(0)
if unicode.IsLetter(c) || c == '_' {
return l.consumeWhile(isIdentifier, unit.Identifier)
} else if isNumber(c) {
start := l.consumeWhile(isNumber, unit.WholeNumber)
// decimal places
if l.peek(0) == '.' {
l.consume()
part := l.consumeWhile(isNumber, unit.WholeNumber)
start.SetLexeme(start.GetLexeme() + ".") // add the dot back in
return start.JoinToken(part, unit.FloatingNumber)
} else {
// whole number
return start
}
} else if c == ';' { // dump comments completely
l.consume()
l.consumeWhile(func(r rune) bool {
return r != '\n'
}, unit.Invalid)
return ErrorToken
} else if isSymbol(c) {
return l.consumeSymbol()
} else if isPair(c) {
return l.consumePair()
} else if c == '"' {
return l.consumeString()
} else if c == '\'' {
return l.consumeCharacter()
}
return ErrorToken
}
func isIdentifier(r rune) bool {
return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_'
}
func isNumber(r rune) bool {
return unicode.IsDigit(r)
}
func isJunk(r rune) bool {
return r <= ' ' || r == '\n' || r == '\r' || r == '\t' && r != 0
}
func isSymbol(r rune) bool {
return strings.ContainsRune("?+-/*&^$!|:;@#,.'=~`¬\\", r)
}
func isPair(r rune) bool {
return strings.ContainsRune("{}[]()<>", r)
}
|
package altrudos
import (
"errors"
"github.com/jmoiron/sqlx"
)
var (
ErrDonationNotCreated = errors.New("donation on new drive not created")
ErrDriveNotCreated = errors.New("drive on new drive not created")
)
// A new drive is the result of a user submitting the New Drive form
// on the home page
// This form is for both creating a new drive and creating a donation for that
// drive all in one
type NewDrive struct {
SourceUrl string
Name string
SubmittedDonation *SubmittedDonation
Drive *Drive
Donation *Donation
}
// Creates or finds the drive
// Creates the donation
func (nd *NewDrive) Process(ext sqlx.Ext) error {
if _, err := nd.FetchOrCreateDrive(ext); err != nil {
return err
}
if err := nd.CreateDonation(ext); err != nil {
return err
}
return nil
}
// Based on what the user has submitted, this will either find an existing drive
// for that same source or create a new drive if none is found
func (nd *NewDrive) FetchOrCreateDrive(ext sqlx.Ext) (*Drive, error) {
source, err := ParseSourceURL(nd.SourceUrl)
if err != nil {
return nil, err
}
drive, err := GetDriveBySource(ext, source)
if err != nil {
return nil, err
} else if drive != nil {
nd.Drive = drive
return drive, nil
}
drive, err = CreatedDriveBySourceUrl(ext, nd.SourceUrl)
if err == nil {
nd.Drive = drive
}
return drive, err
}
func (nd *NewDrive) CreateDonation(ext sqlx.Ext) error {
dono, err := CreateDonation(ext, nd.Drive.Id, nd.SubmittedDonation)
if err != nil {
return err
}
nd.Donation = dono
return nil
}
|
package main
import (
"io"
"log"
"os"
"strconv"
)
var (
valid_events []string
)
func check(valid []string, el string) bool {
for _, v := range valid {
if v == el {
return true
}
}
return false
}
// Log requires a specific set of input strings
// event \in {"vote", "accept", "confirm", "broadcast", "connection", "send"}, is the occuring event
// msg is general text, which helps debugging
func Log(term int32, event, msg string) {
if !check(valid_events, event) {
panic("INVALID EVENTS!")
}
// use log package to log the data
// timestamp (usec):event:msg
log.Printf(strconv.Itoa((int)(term)) + ":" + event + ":" + msg + "\n")
}
// Setup the logger to write to a specific file
func setupLog(name string) {
valid_events = []string{"vote", "accept", "confirm", "broadcast", "connection", "send", "put"}
// open the file
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
log.Fatalf("Open log file error: %v\n", err)
}
// set up the logging package to write to stderr and the file
mw := io.MultiWriter(os.Stderr, f)
log.SetOutput(mw)
log.SetFlags(log.Lmicroseconds)
}
|
package cmd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/fatih/color"
"github.com/porter-dev/porter/cli/cmd/docker"
"github.com/porter-dev/porter/cli/cmd/github"
"github.com/spf13/cobra"
)
type startOps struct {
imageTag string `form:"required"`
db string `form:"oneof=sqlite postgres"`
driver string `form:"required"`
port *int `form:"required"`
}
var opts = &startOps{}
var serverCmd = &cobra.Command{
Use: "server",
Aliases: []string{"svr"},
Short: "Commands to control a local Porter server",
}
// startCmd represents the start command
var startCmd = &cobra.Command{
Use: "start",
Short: "Starts a Porter server instance on the host",
Run: func(cmd *cobra.Command, args []string) {
if config.Driver == "docker" {
config.SetDriver("docker")
err := startDocker(
opts.imageTag,
opts.db,
*opts.port,
)
if err != nil {
red := color.New(color.FgRed)
red.Println("Error running start:", err.Error())
red.Println("Shutting down...")
err = stopDocker()
if err != nil {
red.Println("Shutdown unsuccessful:", err.Error())
}
os.Exit(1)
}
} else {
config.SetDriver("local")
err := startLocal(
opts.db,
*opts.port,
)
if err != nil {
red := color.New(color.FgRed)
red.Println("Error running start:", err.Error())
os.Exit(1)
}
}
},
}
var stopCmd = &cobra.Command{
Use: "stop",
Short: "Stops a Porter instance running on the Docker engine",
Run: func(cmd *cobra.Command, args []string) {
if config.Driver == "docker" {
if err := stopDocker(); err != nil {
color.New(color.FgRed).Println("Shutdown unsuccessful:", err.Error())
os.Exit(1)
}
}
},
}
func init() {
rootCmd.AddCommand(serverCmd)
serverCmd.AddCommand(startCmd)
serverCmd.AddCommand(stopCmd)
serverCmd.PersistentFlags().AddFlagSet(driverFlagSet)
startCmd.PersistentFlags().StringVar(
&opts.db,
"db",
"sqlite",
"the db to use, one of sqlite or postgres",
)
startCmd.PersistentFlags().StringVar(
&opts.imageTag,
"image-tag",
"latest",
"the Porter image tag to use (if using docker driver)",
)
opts.port = startCmd.PersistentFlags().IntP(
"port",
"p",
8080,
"the host port to run the server on",
)
}
func startDocker(
imageTag string,
db string,
port int,
) error {
env := []string{
"NODE_ENV=production",
"FULLSTORY_ORG_ID=VXNSS",
}
var porterDB docker.PorterDB
switch db {
case "postgres":
porterDB = docker.Postgres
case "sqlite":
porterDB = docker.SQLite
}
startOpts := &docker.PorterStartOpts{
ProcessID: "main",
ServerImageTag: imageTag,
ServerPort: port,
DB: porterDB,
Env: env,
}
_, _, err := docker.StartPorter(startOpts)
if err != nil {
return err
}
green := color.New(color.FgGreen)
green.Printf("Server ready: listening on localhost:%d\n", port)
return config.SetHost(fmt.Sprintf("http://localhost:%d", port))
}
func startLocal(
db string,
port int,
) error {
if db == "postgres" {
return fmt.Errorf("postgres not available for local driver, run \"porter server start --db postgres --driver docker\"")
}
config.SetHost(fmt.Sprintf("http://localhost:%d", port))
porterDir := filepath.Join(home, ".porter")
cmdPath := filepath.Join(home, ".porter", "portersvr")
sqlLitePath := filepath.Join(home, ".porter", "porter.db")
staticFilePath := filepath.Join(home, ".porter", "static")
if _, err := os.Stat(cmdPath); os.IsNotExist(err) {
err := downloadMatchingRelease(porterDir)
if err != nil {
color.New(color.FgRed).Println("Failed to download server binary:", err.Error())
os.Exit(1)
}
}
// otherwise, check the version flag of the binary
cmdVersionPorter := exec.Command(cmdPath, "--version")
writer := &versionWriter{}
cmdVersionPorter.Stdout = writer
err := cmdVersionPorter.Run()
if err != nil || writer.Version != Version {
err := downloadMatchingRelease(porterDir)
if err != nil {
color.New(color.FgRed).Println("Failed to download server binary:", err.Error())
os.Exit(1)
}
}
cmdPorter := exec.Command(cmdPath)
cmdPorter.Env = os.Environ()
cmdPorter.Env = append(cmdPorter.Env, []string{
"IS_LOCAL=true",
"SQL_LITE=true",
"SQL_LITE_PATH=" + sqlLitePath,
"STATIC_FILE_PATH=" + staticFilePath,
fmt.Sprintf("SERVER_PORT=%d", port),
"REDIS_ENABLED=false",
}...)
if _, found := os.LookupEnv("GITHUB_ENABLED"); !found {
cmdPorter.Env = append(cmdPorter.Env, "GITHUB_ENABLED=false")
}
if _, found := os.LookupEnv("PROVISIONER_ENABLED"); !found {
cmdPorter.Env = append(cmdPorter.Env, "PROVISIONER_ENABLED=false")
}
cmdPorter.Stdout = os.Stdout
cmdPorter.Stderr = os.Stderr
err = cmdPorter.Run()
if err != nil {
color.New(color.FgRed).Println("Failed:", err.Error())
os.Exit(1)
}
return nil
}
func stopDocker() error {
agent, err := docker.NewAgentFromEnv()
if err != nil {
return err
}
err = agent.StopPorterContainersWithProcessID("main", false)
if err != nil {
return err
}
green := color.New(color.FgGreen)
green.Println("Successfully stopped the Porter server.")
return nil
}
func downloadMatchingRelease(porterDir string) error {
z := &github.ZIPReleaseGetter{
AssetName: "portersvr",
AssetFolderDest: porterDir,
ZipFolderDest: porterDir,
ZipName: "portersvr_latest.zip",
EntityID: "porter-dev",
RepoName: "porter",
IsPlatformDependent: true,
Downloader: &github.ZIPDownloader{
ZipFolderDest: porterDir,
AssetFolderDest: porterDir,
ZipName: "portersvr_latest.zip",
},
}
err := z.GetRelease(Version)
if err != nil {
return err
}
zStatic := &github.ZIPReleaseGetter{
AssetName: "static",
AssetFolderDest: filepath.Join(porterDir, "static"),
ZipFolderDest: porterDir,
ZipName: "static_latest.zip",
EntityID: "porter-dev",
RepoName: "porter",
IsPlatformDependent: false,
Downloader: &github.ZIPDownloader{
ZipFolderDest: porterDir,
AssetFolderDest: filepath.Join(porterDir, "static"),
ZipName: "static_latest.zip",
},
}
return zStatic.GetRelease(Version)
}
type versionWriter struct {
Version string
}
func (v *versionWriter) Write(p []byte) (n int, err error) {
v.Version = strings.TrimSpace(string(p))
return len(p), nil
}
|
package main
import (
"os"
"github.com/codegangsta/cli"
)
var (
Version = "0.0.1"
)
func main() {
newApp().Run(os.Args)
}
func newApp() *cli.App {
app := cli.NewApp()
app.Name = "pold"
app.Usage = "markdown based blog tool"
app.Version = Version
app.Author = "Konboi"
app.Email = "ryosuke.yabuki+pold@gmail.com"
app.Commands = Commands
return app
}
|
package model // import "model"
import (
"github.com/dgrijalva/jwt-go"
nullable "gopkg.in/guregu/null.v3"
)
// JwtCustomClaims -
type JwtCustomClaims struct {
Idx int `json:"J_Idx"`
Name string `json:"J_Name"`
Email string `json:"J_Email"`
jwt.StandardClaims
}
// Person -
type Person struct {
Idx nullable.Int `json:"P_Idx"`
Name nullable.String `json:"P_Name"`
Email nullable.String `json:"P_Email"`
}
// Paging -
type Paging struct {
Limit int `json:"limit"`
Offset int `json:"offset"`
Page int `json:"page"`
}
|
package shape
import (
"math"
"testing"
"github.com/lukeshiner/raytrace/colour"
"github.com/lukeshiner/raytrace/comparison"
"github.com/lukeshiner/raytrace/material"
"github.com/lukeshiner/raytrace/matrix"
"github.com/lukeshiner/raytrace/ray"
"github.com/lukeshiner/raytrace/vector"
)
func TestShapeDefaultTransform(t *testing.T) {
s := newShape()
if matrix.Equal(s.Transform(), matrix.IdentityMatrix(4)) != true {
t.Error("Sphere default transform was not the identity matrix.")
}
}
func TestShapeDefaultMaterial(t *testing.T) {
s := newShape()
if s.Material() != material.New() {
t.Error("Sphere default material was not correct.")
}
}
func TestShapeSetMaterial(t *testing.T) {
m := material.New()
m.Colour = colour.New(0.5, 0.5, 0.5)
m.Ambient = 0.5
m.Diffuse = 0.3
m.Specular = 0.8
m.Shininess = 150.0
s := newShape()
s.SetMaterial(m)
if s.Material() != m {
t.Error("Could not set Sphere material.")
}
}
func TestShapeSetTransform(t *testing.T) {
s := newShape()
transform := matrix.TranslationMatrix(2, 3, 4)
s.SetTransform(transform)
if matrix.Equal(s.Transform(), transform) != true {
t.Error("Did not set transform on sphere.")
}
}
func TestIntersetTransformedShapeWithRay(t *testing.T) {
var tests = []struct {
ray ray.Ray
transform matrix.Matrix
expectedOrigin, expectedDirection vector.Vector
}{
{
// Intersecting a scaled shape with a ray.
ray: ray.New(vector.NewPoint(0, 0, -5), vector.NewVector(0, 0, 1)),
transform: matrix.ScalingMatrix(2, 2, 2),
expectedOrigin: vector.NewPoint(0, 0, -2.5),
expectedDirection: vector.NewVector(0, 0, 0.5),
},
{
// Intersecting a translated shape with a ray.
ray: ray.New(vector.NewPoint(0, 0, -5), vector.NewVector(0, 0, 1)),
transform: matrix.TranslationMatrix(5, 0, 0),
expectedOrigin: vector.NewPoint(-5, 0, -5),
expectedDirection: vector.NewVector(0, 0, 1),
},
}
for _, test := range tests {
s := newShape()
s.SetTransform(test.transform)
xs := Intersect(&s, test.ray)
_ = xs
result := s.SavedRay()
if !vector.Equal(result.Origin, test.expectedOrigin) ||
!vector.Equal(result.Direction, test.expectedDirection) {
t.Errorf(
"Local Intersect produced %+v, expected (%v, %v).",
result, test.expectedOrigin, test.expectedDirection,
)
}
}
}
func TestNormalAt(t *testing.T) {
var tests = []struct {
transform matrix.Matrix
point, expected vector.Vector
}{
{
// Computing the normal on a translated shape.
transform: matrix.TranslationMatrix(0, 1, 0),
point: vector.NewPoint(0, 1.70711, -0.70711),
expected: vector.NewVector(0, 0.70711, -0.70711),
},
{
// Computing the normal on a transformed shape.
transform: matrix.Multiply(
matrix.ScalingMatrix(1, 0.5, 1), matrix.RotationZMatrix(math.Pi/5)),
point: vector.NewPoint(0, math.Sqrt(2)/2, -math.Sqrt(2)/2),
expected: vector.NewVector(0, 0.97014, -0.24254),
},
}
for _, test := range tests {
s := newShape()
s.SetTransform(test.transform)
result := NormalAt(&s, test.point)
if !vector.Equal(result, test.expected) {
t.Errorf("NormalAt(%v) was %v, expected %v.", test.point, result, test.expected)
}
}
}
func TestNormalAtOnSphere(t *testing.T) {
var tests = []struct {
transform matrix.Matrix
point vector.Vector
expected vector.Vector
}{
{
transform: matrix.IdentityMatrix(4),
point: vector.NewPoint(1, 0, 0),
expected: vector.NewVector(1, 0, 0),
},
{
transform: matrix.IdentityMatrix(4),
point: vector.NewPoint(0, 1, 0),
expected: vector.NewVector(0, 1, 0),
},
{
transform: matrix.IdentityMatrix(4),
point: vector.NewPoint(0, 0, 1),
expected: vector.NewVector(0, 0, 1),
},
{
transform: matrix.IdentityMatrix(4),
point: vector.NewPoint(math.Sqrt(3)/3, math.Sqrt(3)/3, math.Sqrt(3)/3),
expected: vector.NewVector(math.Sqrt(3)/3, math.Sqrt(3)/3, math.Sqrt(3)/3),
},
{
transform: matrix.TranslationMatrix(0, 1, 0),
point: vector.NewPoint(0, 1.70711, -0.70711),
expected: vector.NewVector(0, 0.70711, -0.70711),
},
{
transform: matrix.Multiply(
matrix.ScalingMatrix(1, 0.5, 1),
matrix.RotationZMatrix(math.Pi/5),
),
point: vector.NewPoint(0, math.Sqrt(2)/2, -math.Sqrt(2)/2),
expected: vector.NewVector(0, 0.97014, -0.24254),
},
}
for _, test := range tests {
s := NewSphere()
s.SetTransform(test.transform)
result := NormalAt(s, test.point)
if vector.Equal(result, test.expected) != true {
t.Errorf(
"The normal of sphere %+v at point %+v was %+v, expected %+v.",
s, test.point, result, test.expected,
)
}
if vector.Equal(result, result.Normalize()) != true {
t.Errorf(
"The normal of sphere %+v at point %+v was not normalized.",
s, test.point,
)
}
}
}
func TestGetID(t *testing.T) {
expected := 0
nextID = expected
s := newShape()
if s.ID() != expected {
t.Errorf("First ID was %d, expected %d.", s.ID(), expected)
}
s = newShape()
expected++
if s.ID() != expected {
t.Errorf("Second ID was %d, expected %d.", s.ID(), expected)
}
s = newShape()
expected++
if s.ID() != expected {
t.Errorf("Second ID was %d, expected %d.", s.ID(), expected)
}
}
func TestPlaneNormalAt(t *testing.T) {
var tests = []struct {
plane Shape
point, expected vector.Vector
}{
{
plane: NewPlane(),
point: vector.NewPoint(0, 0, 0),
expected: vector.NewVector(0, 1, 0),
},
{
plane: NewPlane(),
point: vector.NewPoint(10, 0, -10),
expected: vector.NewVector(0, 1, 0),
},
{
plane: NewPlane(),
point: vector.NewPoint(-5, 0, 150),
expected: vector.NewVector(0, 1, 0),
},
}
for _, test := range tests {
result := test.plane.LocalNormalAt(test.point)
if !vector.Equal(result, test.expected) {
t.Errorf(
"Plane local normal at %v was %v, expected %v.",
test.point, result, test.expected,
)
}
}
}
func TestPlaneLocalIntersect(t *testing.T) {
var tests = []struct {
plane Shape
ray ray.Ray
expected []float64
}{
{
// Intersect with a ray parallel to the plane.
plane: NewPlane(),
ray: ray.New(vector.NewPoint(0, 10, 0), vector.NewVector(0, 0, 1)),
expected: []float64{},
},
{
// Intersect with a coplanar ray.
plane: NewPlane(),
ray: ray.New(vector.NewPoint(0, 0, 0), vector.NewVector(0, 0, 1)),
expected: []float64{},
},
{
// A ray intersecting a plane from above.
plane: NewPlane(),
ray: ray.New(vector.NewPoint(0, 1, 0), vector.NewVector(0, -1, 0)),
expected: []float64{1},
},
{
// A ray intersecting a plane from below.
plane: NewPlane(),
ray: ray.New(vector.NewPoint(0, -1, 0), vector.NewVector(0, 1, 0)),
expected: []float64{1},
},
}
for _, test := range tests {
intersections := test.plane.LocalIntersect(test.ray)
result := intersections.TSlice()
if !comparison.EqualSlice(result, test.expected) {
t.Errorf(
"Intersection of plane and ray (%v) was %v, expected %v",
test.ray, result, test.expected,
)
}
}
}
|
package sha512
import (
"crypto/sha512"
"testing"
)
// TestSum512 tests if our custom implementation of sha512 is correct in reference
// to the standard library crypto/sha512
func TestSum512(t * testing.T) {
testInput := "Testing"
a := sha512.Sum512([]byte(testInput))
b := Sum512([]byte(testInput))
if a != b {
t.Errorf("got %x, want %x", b, a)
}
t.Logf("%x == %x", b, a)
} |
package epaxos
import (
"github.com/google/btree"
pb "github.com/nvanbenschoten/epaxos/epaxos/epaxospb"
)
// Storage allows for the persistence of EPaxos state to provide durability.
type Storage interface {
HardState() (pb.HardState, bool)
PersistHardState(hs pb.HardState)
Instances() []*pb.InstanceState
PersistInstance(is *pb.InstanceState)
}
var _ Storage = &MemoryStorage{}
// MemoryStorage implements the Storage interface backed by an in-memory
// data structure.
type MemoryStorage struct {
hardState struct {
set bool
hs pb.HardState
}
instances map[pb.ReplicaID]*btree.BTree // *pb.InstanceState Items
}
// NewMemoryStorage returns a new in-memory implementation of Storage using
// the provided Config.
func NewMemoryStorage(c *Config) Storage {
s := &MemoryStorage{
instances: make(map[pb.ReplicaID]*btree.BTree, len(c.Nodes)),
}
for _, rep := range c.Nodes {
s.instances[rep] = btree.New(32 /* degree */)
}
return s
}
// HardState implements the Storage interface.
func (ms *MemoryStorage) HardState() (pb.HardState, bool) {
if ms.hardState.set {
return ms.hardState.hs, true
}
return pb.HardState{}, false
}
// PersistHardState implements the Storage interface.
func (ms *MemoryStorage) PersistHardState(hs pb.HardState) {
ms.hardState.hs = hs
ms.hardState.set = true
}
func instanceStateKey(i pb.InstanceNum) btree.Item {
return &pb.InstanceState{InstanceID: pb.InstanceID{InstanceNum: i}}
}
// Instances implements the Storage interface.
func (ms *MemoryStorage) Instances() []*pb.InstanceState {
var insts []*pb.InstanceState
for _, replInsts := range ms.instances {
replInsts.Ascend(func(i btree.Item) bool {
insts = append(insts, i.(*pb.InstanceState))
return true
})
}
return insts
}
// PersistInstance implements the Storage interface.
func (ms *MemoryStorage) PersistInstance(is *pb.InstanceState) {
ms.instances[is.ReplicaID].ReplaceOrInsert(is)
}
|
// Copyright 2016-2018 Granitic. All rights reserved.
// Use of this source code is governed by an Apache 2.0 license that can be found in the LICENSE file at the root of this project.
/*
Package ioc provides an Inversion of Control component container and lifecycle hooks.
This package provides the types that define and support Granitic component container, which allows your application's
objects and Granitic's framework facilities to follow the inversion of control (IoC) pattern by having their lifecycle
and dependencies managed for them. An in-depth discussion of Granitic IoC can be found at http://granitic.io/1.0/ref/ioc
but a description of the core concepts follows.
Components
A component is defined by Granitic as an instance of a Go struct with a name that is unique within an application.
Each component in your application requires a entry in the components section of your application's component definition file like:
{
"components": {
"componentName": {
"type": "package.structType"
}
}
}
e.g:
{
"components": {
"createRecordLogic": {
"type": "inventory.CreateRecordLogic"
}
}
}
For complete information on defining components, refer to http://granitic.io/1.0/ref/components
Granitic's documentation will use the term component and component instance interchangeably. For example, 'component field'
means 'a field on the instance of the Go struct associated with that component'.
The container
When Granitic starts, it will create an instance of ioc.ComponentContainer - a structure that holds references to
each of the components in your application. It is responsible for injecting dependencies and configuration into your
components (see below) and managing lifecycle events (also see below). The container is also referred to as the component
container or IoC container.
Framework components
For each Granitic facility that you enable in your application, one or more framework components will be created and added to the container.
A framework component is exactly the same as any other component - an instance of a struct with a name. Depending on the
complexity of the facility, multiple components may be created.
A very simple name-spacing is used to separate the names of framework components from application components -
framework components' names all start with the string grnc
You are strongly encouraged to make sure your application components' names do not start with this string.
Dependencies and configuration
As part of its definition, your components can request that other components are injected into its fields. Your definition
can also include configuration (actual values to be set when the component is instantiated) or configuration promises
(values to be injected once all sources of configuration have been merged together).
{
"components": {
"createRecordLogic": {
"type": "inventory.CreateRecordLogic",
"MaxTracks": 20,
"ArtistMustExist": "conf:record.disableAutoArtist",
"DAO": "ref:inventoryDAO"
}
}
}
In the above example, the field CreateRecordLogic.MaxTracks is set to 20 when the struct is instantiated, ArtistMustExist
is set to the config element 'record.disableAutoArtist' and DAO is set to a reference to another component's instance. Note that c: and r:
can be used as shorthand for config: and ref: See http://granitic.io/1.0/ref/components for more information
Any error such as type mismatches or missing configuration will cause an error that will halt application startup.
Component templates
A template mechanism exists to allow multiple components that share a type, dependencies or configuration items to
only have those elements defined once. This is especially useful for web service handlers. See
http://granitic.io/1.0/ref/components#templates for more details.
Binding
Unlike JVM/CLR languages, Go has no runtime 'instance-for-type-name' mechanism for creating instances of a struct. As
a result, unlike JVM/CLR IoC containers you may have used, the container does not instantiate the actual instances
of the Go structs behind application components. Instead a 'binding' proces is used - refer to the pacakage documentation
for the grnc-bind tool for more information.
Container lifecycle
The process of starting the container transitions through a number of distinct phase. It is possible for your code
to be explicitly invoked during some of these phases by implementing one more lifecycle interfaces.
Populate Application and framework components are stored in the container.
Configure Configuration and dependencies are resolved and injected into components.
Decorate Components implementing the ioc.ComponentDecorator are given access to all other components
to potentially modify.
Start Components implementing the ioc.Startable interface are invoked.
Access Components implementing ioc.AccessibilityBlocker and ioc.Accessible are interacted with.
Ready Granitic application is running.
When the container is ready, a log message similar to
grncInit Ready (startup time 6.28ms)
will be logged.
There are several other possible lifecycle phases after the container is ready:
Suspend Components implementing ioc.Suspendable have their Suspend method invoked.
Resume Components implementing ioc.Suspendable have their Resume method invoked.
Stop Components implementing ioc.Stoppable are allowed to stop gracefully before the application exits.
Decorators
Decorators are special components implementing ioc.ComponentDecorator. Their main purpose is to inject dynamically
created objects into other components (such as Loggers). Decorators are destroyed after the Decorate phase of container
startup.
Stopping
Components that need to perform some shutdown process before an application exits should implement the Stoppable
interface. See the GoDoc for ioc.Stoppable below for more detail.
Container settings
The file $GRANITIC_HOME/resource/facility-config/system.json contains configuration, that can be overridden in your
application's configuration file, affecting startup, garbage collection and shutdown behaviour of the container.
More information can be found at http://granitic.io/1.0/ref/system-settings
Gaining access to the container
If your application component needs direct access to the container it should implement the ioc.ContainerAccessor. A
reference to the container will be injected into your component during the decorate phase.
External interaction with the container
If your application enables the RuntimeCtl facility, you can interact with the container and its components by using
the grnc-ctl command line utility. See the package documentation for grnc-ctl for more information.
*/
package ioc
// What state (stopped, running) or transition between states (stopping, starting) a component is currently in.
type ComponentState int
const (
StoppedState = iota
StoppingState
StartingState
AwaitingAccessState
RunningState
SuspendingState
SuspendedState
ResumingState
)
// A wrapping structure for a list of ProtoComponents and FrameworkDependencies that is required when starting Granitic.
// A ProtoComponents structure is built by the grnc-bind tool.
type ProtoComponents struct {
// ProtoComponents to be finalised and stored in the IoC container.
Components []*ProtoComponent
// FrameworkDependencies are instructions to inject components into built-in Granitic components to alter their behaviour.
// The structure is map[
FrameworkDependencies map[string]map[string]string
//A Base64 encoded version of the JSON files found in resource/facility-confg
FrameworkConfig *string
}
// Clear removes the reference to the ProtoComponent objects held in this object, encouraging garbage collection.
func (pc *ProtoComponents) Clear() {
pc.Components = nil
}
// NewProtoComponents creates a wrapping structure for a list of ProtoComponents
func NewProtoComponents(pc []*ProtoComponent, fd map[string]map[string]string, ser *string) *ProtoComponents {
p := new(ProtoComponents)
p.Components = pc
p.FrameworkDependencies = fd
p.FrameworkConfig = ser
return p
}
// CreateProtoComponent creates a new ProtoComponent.
func CreateProtoComponent(componentInstance interface{}, componentName string) *ProtoComponent {
proto := new(ProtoComponent)
component := new(Component)
component.Name = componentName
component.Instance = componentInstance
proto.Component = component
return proto
}
// A ProtoComponent is a partially configured component that will be hosted in the Granitic IoC container once
// it is fully configured. Typically ProtoComponents are created using the grnc-bind tool.
type ProtoComponent struct {
// The name of a component and the component instance (a pointer to an instantiated struct).
Component *Component
// A map of fields on the component instance and the names of other components that should be injected into those fields.
Dependencies map[string]string
// A map of fields on the component instance and the config-path that will contain the configuration that shoud be inject into the field.
ConfigPromises map[string]string
}
// AddDependency requests that the container injects another component into the specified field during the configure phase of
// container startup
func (pc *ProtoComponent) AddDependency(fieldName, componentName string) {
if pc.Dependencies == nil {
pc.Dependencies = make(map[string]string)
}
pc.Dependencies[fieldName] = componentName
}
// AddDependency requests that the container injects the config value at the specified path into the specified field during the configure phase of
// container startup.
func (pc *ProtoComponent) AddConfigPromise(fieldName, configPath string) {
if pc.ConfigPromises == nil {
pc.ConfigPromises = make(map[string]string)
}
pc.ConfigPromises[fieldName] = configPath
}
// A Component is an instance of a struct with a name that is unique within your application.
type Component struct {
// A pointer to a struct
Instance interface{}
// A name for this component that is unique within your application
Name string
}
// Type definition for a slice of components to allow sorting.
type Components []*Component
func (s Components) Len() int { return len(s) }
func (s Components) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type ByName struct{ Components }
func (s ByName) Less(i, j int) bool { return s.Components[i].Name < s.Components[j].Name }
// Implemented by components where the component's instance needs to be aware of its own component name.
type ComponentNamer interface {
// ComponentName returns the name of the component
ComponentName() string
// SetComponentName injects the component's name
SetComponentName(name string)
}
// NewComponent creates a new Component with the supplied name and instance
func NewComponent(name string, instance interface{}) *Component {
c := new(Component)
c.Instance = instance
c.Name = name
return c
}
|
package main
import (
"bufio"
"fmt"
"github.com/miekg/dns"
"net"
"os"
)
func main() {
fmt.Println("jooo")
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
fmt.Println(mx, err)
fileIn, err := os.Open("tld_clean.lst")
if err != nil {
//return err
fmt.Println(err)
}
defer fileIn.Close()
scanner := bufio.NewScanner(fileIn)
for scanner.Scan() {
line := scanner.Text()
fmt.Println("testing:", line)
nameserver, err := net.LookupNS(line)
if err != nil {
//return err
fmt.Println(err)
continue
}
fmt.Println(nameserver)
}
if err := scanner.Err(); err != nil {
//return err
fmt.Println(err)
}
}
|
// Package template provides a simple templating solution reusable in filters.
//
// (Note that the current template syntax is EXPERIMENTAL, and may change in
// the near future.)
package eskip
import (
"regexp"
"strings"
"github.com/zalando/skipper/filters"
)
var placeholderRegexp = regexp.MustCompile(`\$\{([^{}]+)\}`)
// TemplateGetter functions return the value for a template parameter name.
type TemplateGetter func(string) string
// Template represents a string template with named placeholders.
type Template struct {
template string
placeholders []string
}
// New parses a template string and returns a reusable *Template object.
// The template string can contain named placeholders of the format:
//
// Hello, ${who}!
//
func NewTemplate(template string) *Template {
matches := placeholderRegexp.FindAllStringSubmatch(template, -1)
placeholders := make([]string, len(matches))
for index, placeholder := range matches {
placeholders[index] = placeholder[1]
}
return &Template{template: template, placeholders: placeholders}
}
// Apply evaluates the template using a TemplateGetter function to resolve the
// placeholders.
func (t *Template) Apply(get TemplateGetter) string {
if get == nil {
return t.template
}
result, _ := t.apply(get)
return result
}
// ApplyRequestContext evaluates the template using a filter context and request attributes to resolve the
// placeholders. Returns true if all placeholders resolved to non-empty values.
func (t *Template) ApplyRequestContext(ctx filters.FilterContext) (string, bool) {
return t.apply(contextGetter(ctx, false))
}
// ApplyResponseContext evaluates the template using a filter context, request and response attributes to resolve the
// placeholders. Returns true if all placeholders resolved to non-empty values.
func (t *Template) ApplyResponseContext(ctx filters.FilterContext) (string, bool) {
return t.apply(contextGetter(ctx, true))
}
func contextGetter(ctx filters.FilterContext, response bool) func(key string) string {
return func(key string) string {
if h := strings.TrimPrefix(key, "request.header."); h != key {
return ctx.Request().Header.Get(h)
}
if q := strings.TrimPrefix(key, "request.query."); q != key {
return ctx.Request().URL.Query().Get(q)
}
if c := strings.TrimPrefix(key, "request.cookie."); c != key {
if cookie, err := ctx.Request().Cookie(c); err == nil {
return cookie.Value
}
return ""
}
if key == "request.path" {
return ctx.Request().URL.Path
}
if response {
if h := strings.TrimPrefix(key, "response.header."); h != key {
return ctx.Response().Header.Get(h)
}
}
return ctx.PathParam(key)
}
}
// apply evaluates the template using a TemplateGetter function to resolve the
// placeholders. Returns true if all placeholders resolved to non-empty values.
func (t *Template) apply(get TemplateGetter) (string, bool) {
result := t.template
missing := false
for _, placeholder := range t.placeholders {
value := get(placeholder)
if value == "" {
missing = true
}
result = strings.Replace(result, "${"+placeholder+"}", value, -1)
}
return result, !missing
}
|
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"errors"
"fmt"
"github.com/containerd/containerd/cmd/ctr/commands"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/platforms"
"github.com/containerd/stargz-snapshotter/ipfs"
estargzconvert "github.com/containerd/stargz-snapshotter/nativeconverter/estargz"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// IPFSPushCommand pushes an image to IPFS
var IPFSPushCommand = cli.Command{
Name: "ipfs-push",
Usage: "push an image to IPFS (experimental)",
ArgsUsage: "[flags] <image_ref>",
Flags: []cli.Flag{
// platform flags
cli.StringSliceFlag{
Name: "platform",
Usage: "Add content for a specific platform",
Value: &cli.StringSlice{},
},
cli.BoolFlag{
Name: "all-platforms",
Usage: "Add content for all platforms",
},
cli.BoolTFlag{
Name: "estargz",
Usage: "Convert the image into eStargz",
},
},
Action: func(context *cli.Context) error {
srcRef := context.Args().Get(0)
if srcRef == "" {
return errors.New("image need to be specified")
}
var platformMC platforms.MatchComparer
if context.Bool("all-platforms") {
platformMC = platforms.All
} else {
if pss := context.StringSlice("platform"); len(pss) > 0 {
var all []ocispec.Platform
for _, ps := range pss {
p, err := platforms.Parse(ps)
if err != nil {
return fmt.Errorf("invalid platform %q: %w", ps, err)
}
all = append(all, p)
}
platformMC = platforms.Ordered(all...)
} else {
platformMC = platforms.DefaultStrict()
}
}
client, ctx, cancel, err := commands.NewClient(context)
if err != nil {
return err
}
defer cancel()
var layerConvert converter.ConvertFunc
if context.Bool("estargz") {
layerConvert = estargzconvert.LayerConvertFunc()
}
p, err := ipfs.Push(ctx, client, srcRef, layerConvert, platformMC)
if err != nil {
return err
}
logrus.WithField("CID", p).Infof("Pushed")
fmt.Println(p)
return nil
},
}
|
package main
import (
"bytes"
b64 "encoding/base64"
"fmt"
"io/ioutil"
"os"
"os/exec"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type MyEvent struct {
ScreenName string `json:"screen_name"`
PNGBase64 string `json:"pngbase64"`
}
type MyResponse struct {
URI string `json:"uri:"`
OK bool `json:"ok"`
}
func handler(request MyEvent) (MyResponse, error) {
var BUCKET = os.Getenv("BUCKET")
var KEY = fmt.Sprintf("/%v.png", request.ScreenName)
fmt.Printf("loaded envvar\n")
// extract image file from event
decoded, err := b64.StdEncoding.DecodeString(request.PNGBase64)
if err != nil {
fmt.Printf("error occurred: %v\n", err)
return MyResponse{URI: "", OK: false}, err
}
fmt.Printf("decoded\n")
// save image into temporary file
tmpFile, err := ioutil.TempFile("", "received*.png")
if err != nil {
fmt.Printf("error occurred: %v\n", err)
return MyResponse{URI: "", OK: false}, err
}
defer os.Remove(tmpFile.Name())
_, err = tmpFile.Write(decoded)
if err != nil {
fmt.Printf("error occurred: %v\n", err)
return MyResponse{URI: "", OK: false}, err
}
tmpFile.Sync()
tmpFile.Close()
fmt.Printf("wrote\n")
// call primitive
primitive := exec.Command("/primitive", "-n", "10", "-m", "1", "-i", tmpFile.Name(), "-o", "/tmp/result.png")
err = primitive.Run()
if err != nil {
fmt.Printf("error occurred: %v\n", err)
return MyResponse{URI: "", OK: false}, err
}
fmt.Printf("ran primitive\n")
// load result image
resultFile, err := ioutil.ReadFile("/tmp/result.png")
if err != nil {
fmt.Printf("error occurred: %v\n", err)
return MyResponse{URI: "", OK: false}, err
}
fmt.Printf("loaded image\n")
// upload file into S3
svc := s3.New(session.New(), &aws.Config{
Region: aws.String(endpoints.ApNortheast1RegionID),
})
_, errpo := svc.PutObject(&s3.PutObjectInput{
Body: bytes.NewReader(resultFile),
Bucket: aws.String(BUCKET),
Key: aws.String(KEY),
ACL: aws.String("public-read"),
ContentType: aws.String("image/png"),
})
if errpo != nil {
fmt.Printf("error occurred: %v\n", errpo)
return MyResponse{URI: "", OK: false}, errpo
}
fmt.Printf("uploaded\n")
// return image URI
return MyResponse{URI: "", OK: true}, nil
}
func main() {
lambda.Start(handler)
}
|
package main
import (
"fmt"
"os"
"bufio"
"strings"
)
func main() {
fmt.Print("Enter a string:")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
text := scanner.Text()
fmt.Printf("Given text : <%s>..\n", text)
words := strings.Split(text," ")
m := make(map[string]int)
for _, each_word := range words {
_, found := m[each_word]
if (found == false) {
m[each_word] = 1
} else {
m[each_word] = m[each_word] + 1
}
}
for key,value := range m {
fmt.Printf("%s -> %d\n", key, value)
}
}
|
// Copyright (c) 2020 Cisco and/or its affiliates.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tgo provides a mechanism for building an indirectory go cache (source and binaries) transparently
// in a manner that eases use of go with docker.
package tgo
import (
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/edwarnicke/exechelper"
"github.com/joho/godotenv"
"github.com/pkg/errors"
)
const (
pwdEnv = "PWD"
goPathEnv = "GOPATH"
goCacheEnv = "GOCACHE"
)
// Tgo provides a mechanism for building an indirectory go cache (source and binaries) transparently
// in a manner that eases use of go with docker.
type Tgo struct {
tGoParent string
tGoDir string
tGoRoot string
config map[string]string
goEnv map[string]string
once sync.Once
err error
}
// New - a new Tgo cache
func New(pwd string) *Tgo {
t := &Tgo{
tGoParent: pwd,
tGoDir: filepath.Join(pwd, ".tgo"),
tGoRoot: filepath.Join(pwd, ".tgo", "root"),
config: make(map[string]string),
}
return t
}
func (t *Tgo) init() error {
t.once.Do(func() {
// Read the config
config, err := godotenv.Read(filepath.Join(t.tGoDir, "env"))
if err == nil {
t.config = config
}
if err != nil && !os.IsNotExist(err) {
t.err = err
return
}
// Grab the go envs from go
output, err := exechelper.Output("go env", exechelper.WithEnvirons(os.Environ()...))
if err != nil {
t.err = err
return
}
env, err := godotenv.Unmarshal(string(output))
if err != nil {
t.err = err
return
}
t.goEnv = env
// Initialize .tgo if it didn't exist before
if _, ok := t.config[pwdEnv]; !ok {
t.config[pwdEnv] = t.tGoParent
t.config[goPathEnv] = t.goEnv[goPathEnv]
t.config[goCacheEnv] = t.goEnv[goCacheEnv]
if err := t.mkdirs(); err != nil {
t.err = err
return
}
if err := t.mksymlink(); err != nil {
t.err = err
return
}
if err := godotenv.Write(t.config, filepath.Join(t.tGoDir, "env")); err != nil {
t.err = err
return
}
}
// Load source
if t.config[pwdEnv] == t.tGoParent {
if err := t.copysource(); err != nil {
t.err = err
return
}
}
})
return t.err
}
// Run - run in the TGo cache in exechelper style
func (t *Tgo) Run(cmdString string, options ...*exechelper.Option) error {
if err := t.init(); err != nil {
return err
}
stdenv := []*exechelper.Option{
exechelper.WithEnvirons(os.Environ()...),
exechelper.WithStdout(os.Stdout),
exechelper.WithStderr(os.Stderr),
exechelper.WithStdin(os.Stdin),
exechelper.WithEnvKV(goPathEnv, t.tGoPath(t.config[goPathEnv])),
exechelper.WithEnvKV(pwdEnv, t.tGoPath(t.config[pwdEnv])),
}
// If we are not where the source cache was built, cache the binary objects to for later recovery
if t.config[pwdEnv] != t.tGoParent {
stdenv = append(stdenv, exechelper.WithEnvKV(goCacheEnv, t.tGoPath(t.config[goCacheEnv])))
}
options = append(stdenv, options...)
if err := exechelper.Run(cmdString, options...); err != nil {
return errors.Wrapf(err, "Error running %s", cmdString)
}
return nil
}
// Clean - removes the .tgo directory
func (t *Tgo) Clean() error {
return t.clean(t.tGoDir)
}
func (t *Tgo) clean(dir string) error {
if !strings.HasPrefix(dir, t.tGoDir) {
return errors.Errorf("Cannot clean %q as it is not under %q", dir, t.tGoRoot)
}
if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&0200 == 0 {
if err := os.Chmod(path, info.Mode()|0200); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
if err := os.RemoveAll(dir); err != nil {
return err
}
return nil
}
func (t *Tgo) tGoPath(path string) string {
return filepath.Join(t.tGoRoot, path)
}
func (t *Tgo) mkdirs() error {
for _, dir := range []string{filepath.Dir(t.config[pwdEnv]), t.config[goPathEnv], t.config[goCacheEnv]} {
if err := os.MkdirAll(t.tGoPath(dir), 0750); err != nil {
return err
}
info, err := os.Stat(dir)
if err != nil {
return err
}
if err := os.Chmod(t.tGoPath(dir), info.Mode()); err != nil {
return err
}
}
return nil
}
func (t *Tgo) mksymlink() error {
// Symlink to the tgoParent
relPwd, err := filepath.Rel(t.tGoPath(filepath.Dir(t.tGoParent)), t.tGoParent)
if err != nil {
return err
}
if err := os.Symlink(relPwd, t.tGoPath(t.tGoParent)); err != nil {
return err
}
return nil
}
func (t *Tgo) copysource() error {
// Cache the source and binaries
options := []*exechelper.Option{
exechelper.WithEnvirons(os.Environ()...),
exechelper.WithStderr(os.Stderr),
}
// Get all the package depended no in tgoParent
output, err := exechelper.Output(`go list -f '{{if .Module}}{{printf "%s\n%s" .Dir .Module.GoMod }}{{else}}{{.Dir}}{{end}}' all ./...`, options...)
if err != nil {
return err
}
// Extract the directories from output
dirs := strings.Split(strings.TrimSpace(string(output)), "\n")
// Sort the dirs, because it allows us to skip recopying subdirs of dirs we already copied
sort.Strings(dirs)
var dirPrefix string
for _, dir := range dirs {
// Leave GOROOT and GOPATH out of this... GOPATH can be reconstructed from within the Tgo directory
if strings.HasPrefix(dir, t.goEnv["GOROOT"]) || strings.HasPrefix(dir, t.goEnv[goPathEnv]) || strings.HasPrefix(dir, t.tGoParent) {
continue
}
// Copy all other source code in
if dirPrefix == "" || !strings.HasPrefix(dir, dirPrefix) {
if err := t.clean(t.tGoPath(dir)); err != nil && !os.IsNotExist(err) {
return err
}
if err := filepath.Walk(dir, t.sourceCopyWalkFunc); err != nil {
return err
}
dirPrefix = dir
}
}
return nil
}
func (t *Tgo) sourceCopyWalkFunc(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasSuffix(path, ".tgo") || strings.HasSuffix(path, ".git") ||
strings.Contains(path, ".tgo"+string(os.PathSeparator)) || strings.Contains(path, ".git"+string(os.PathSeparator)) {
return nil
}
if err := os.MkdirAll(filepath.Dir(t.tGoPath(path)), 0700); err != nil {
return err
}
if info.IsDir() {
if err := os.MkdirAll(t.tGoPath(path), info.Mode()|0200); err != nil {
return err
}
}
if info.Mode().IsRegular() {
src, fileErr := os.Open(path) // #nosec
if fileErr != nil {
return err
}
defer func() { _ = src.Close() }()
dst, err := os.OpenFile(t.tGoPath(path), os.O_RDWR|os.O_CREATE, info.Mode()|0200)
if err != nil {
return err
}
defer func() { _ = dst.Close() }()
if _, err := io.Copy(dst, src); err != nil {
return err
}
}
if info.Mode()&os.ModeSymlink != 0 {
link, err := os.Readlink(path)
if err != nil {
return err
}
if err := os.Symlink(link, t.tGoPath(path)); err != nil {
return err
}
if err := os.Chmod(t.tGoPath(path), info.Mode()|0200); err != nil {
return err
}
}
return nil
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crostini
import (
"context"
"net/http"
"net/http/httptest"
"path/filepath"
"strings"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/colorcmp"
"chromiumos/tast/local/crostini"
"chromiumos/tast/testing"
)
type copyPasteAction int
const (
copying copyPasteAction = iota
pasting
blockerTitle string = "secure_blocker.html"
)
type secureCopyPasteConfig struct {
backend string
app string
action copyPasteAction
}
func init() {
testing.AddTest(&testing.Test{
Func: SecureCopyPaste,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies that background crostini apps can not access the clipboard",
Contacts: []string{"clumptini+oncall@google.com"},
Attr: []string{"group:mainline"},
Data: []string{blockerTitle},
SoftwareDeps: []string{"chrome", "vm_host"},
Params: []testing.Param{
// Parameters generated by secure_copy_paste_test.go. DO NOT EDIT.
{
Name: "copy_wayland_buster_stable",
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_wayland_buster_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_wayland_bullseye_stable",
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_wayland_bullseye_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_x11_buster_stable",
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_x11_buster_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_x11_bullseye_stable",
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "copy_x11_bullseye_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_copy.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_copy.py",
action: copying,
},
}, {
Name: "paste_wayland_buster_stable",
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_wayland_buster_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_wayland_bullseye_stable",
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_wayland_bullseye_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "wayland",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_x11_buster_stable",
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_x11_buster_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBuster",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_x11_bullseye_stable",
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniStable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_paste.py",
action: pasting,
},
}, {
Name: "paste_x11_bullseye_unstable",
ExtraAttr: []string{"informational"},
ExtraData: []string{"secure_paste.py"},
ExtraSoftwareDeps: []string{"dlc"},
ExtraHardwareDeps: crostini.CrostiniUnstable,
Fixture: "crostiniBullseye",
Timeout: 7 * time.Minute,
Val: secureCopyPasteConfig{
backend: "x11",
app: "secure_paste.py",
action: pasting,
},
},
},
})
}
// forceClipboard is a stronger version of the setClipboardTextData api, which
// repeatedly sets/checks the clipboard data to ensure that the requested value
// is on there. We need this because the applications under test are fighting
// for clipboard control.
func forceClipboard(ctx context.Context, tconn *chrome.TestConn, data string) error {
return testing.Poll(ctx, func(ctx context.Context) error {
if err := tconn.Call(ctx, nil, `tast.promisify(chrome.autotestPrivate.setClipboardTextData)`, data); err != nil {
return err
}
var clipData string
if err := tconn.Eval(ctx, `tast.promisify(chrome.autotestPrivate.getClipboardTextData)()`, &clipData); err != nil {
return err
}
if clipData != data {
return errors.Errorf("clipboard data missmatch: got %q, want %q", clipData, data)
}
return nil
}, &testing.PollOptions{Timeout: 20 * time.Second})
}
func SecureCopyPaste(ctx context.Context, s *testing.State) {
conf := s.Param().(secureCopyPasteConfig)
pre := s.FixtValue().(crostini.FixtureData)
cr := pre.Chrome
tconn := pre.Tconn
cont := pre.Cont
// Initialize the clipboard data before the test.
if err := forceClipboard(ctx, tconn, ""); err != nil {
s.Fatal("Failed to set clipboard data: ", err)
}
// Launch the app.
if err := cont.PushFile(ctx, s.DataPath(conf.app), conf.app); err != nil {
s.Fatalf("Failed to push %v to container: %v", conf.app, err)
}
appID, exitCallback, err := crostini.LaunchGUIApp(ctx, tconn, cont.Command(ctx, "env", "GDK_BACKEND="+conf.backend, "python3", conf.app))
if err != nil {
s.Fatal("Failed to launch crostini app: ", err)
}
defer exitCallback()
s.Log("Launched crostini app with ID: ", appID)
// Bring up a webpage to block the crostini app. We use the MatchScreenshotDominantColor
// trick because we have need the page to complete fading in animations before it can
// block other applications' access to the clipboard.
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
conn, err := cr.NewConn(ctx, server.URL+"/"+blockerTitle)
if err != nil {
s.Fatal("Failed to open a blocker: ", err)
}
defer conn.Close()
ws, err := ash.GetAllWindows(ctx, tconn)
if err != nil {
s.Fatal("Failed to retrieve currently opened windows: ", err)
}
// Maximize the blocker to ensure our screenshot dominant colour condition succeeds.
// GetAllWindows returns windows by their stacking order, so ws[0] is the foregrounded window.
maximized := false
for _, w := range ws {
if strings.Contains(w.Title, blockerTitle) {
ash.SetWindowState(ctx, tconn, w.ID, ash.WMEventMaximize, true /* waitForStateChange */)
maximized = true
break
}
}
if !maximized {
s.Fatal("Failed to find the secure_blocker window to maximize")
}
if err := crostini.MatchScreenshotDominantColor(ctx, cr, colorcmp.RGB(0, 0, 0), filepath.Join(s.OutDir(), "screenshot.png")); err != nil {
s.Fatal("Failed during screenshot check: ", err)
}
// Set the clipboard data now that the blocker is up.
if err := forceClipboard(ctx, tconn, "secret"); err != nil {
s.Fatal("Failed to set clipboard data: ", err)
}
// Poll the clipboard to make sure it does NOT change.
var clipboardCheck func(ctx context.Context) error
if conf.action == copying {
// For copying, we are checking to see that the app didn't replace the clipboard
// contents (currently: "secret").
clipboardCheck = func(ctx context.Context) error {
var clipData string
if err := tconn.Eval(ctx, `tast.promisify(chrome.autotestPrivate.getClipboardTextData)()`, &clipData); err != nil {
return err
} else if clipData == "secret" {
return errors.New("clipboard data has not been changed")
}
return nil
}
} else {
// For pasting, the app will exit itself if it reads "secret" from the clipboard,
// so we just check to see if it is still running.
clipboardCheck = func(ctx context.Context) error {
if visible, err := ash.AppShown(ctx, tconn, appID); err != nil {
return err
} else if visible {
return errors.New("application is still visible")
}
return nil
}
}
// First, check that while the blocker is up, the app can not interact with the clipboard.
if err := testing.Poll(ctx, clipboardCheck, &testing.PollOptions{Timeout: 30 * time.Second}); err == nil {
s.Fatal("Failed to block clipboard contents while inactive")
}
// Remove the blocker.
if err := conn.CloseTarget(ctx); err != nil {
s.Fatal("Failed to close the blocker: ", err)
}
// Now the blocker is removed, re-run the above theck to ensure that the (now active) app can interact.
if err := testing.Poll(ctx, clipboardCheck, &testing.PollOptions{Timeout: 30 * time.Second}); err != nil {
s.Fatal("Failed to access the clipboard while active: ", err)
}
}
|
package messaging
import (
"time"
)
const (
ADD_ACK string = "ADD_ACK"
DELETE_ACK string = "DEL_ACK"
MODIFIED_ACK string = "MOD_ACK"
)
type AcknowledgeMessage struct {
Sender string `json:"sender"`
BaseNode string `json:"base_node"`
TypeAck string `json:"type_ack"`
Component ComponentAck `json:"component"`
Timestamp int64 `json:"timestamp"`
}
type ComponentAck struct {
Name string `json:"name"`
AppName string `json:"app_name"`
Function FunctionAck `json:"function"`
}
type FunctionAck struct {
Image string `json:"image"`
Resources ResourcesAck `json:"resources"`
}
type ResourcesAck struct {
Memory float64 `json:"memory"`
Cpu float64 `json:"cpu"`
}
func NewAcknowledgeMessage(sender string, baseNode string, typeAck string, component ComponentAck) *AcknowledgeMessage {
message := AcknowledgeMessage{Sender: sender, BaseNode: baseNode, TypeAck: typeAck, Component: component, Timestamp: time.Now().Unix()}
return &message
}
func NewComponentAck(name string, appName string, image string, memory float64, cpu float64) *ComponentAck {
resources := ResourcesAck{memory, cpu}
functionAck := FunctionAck{Image: image, Resources: resources}
component := ComponentAck{Name: name, AppName: appName, Function: functionAck}
return &component
}
|
package quantity
type ConversionMap map[Unit]map[Unit]ConversionRatio
type ConversionRatio struct {
From Unit
To Unit
Ratio int
}
const (
feet Unit = "feet"
inches Unit = "inches"
)
var feetToInches ConversionRatio = ConversionRatio{feet, inches, 12}
func NewConversionMap() ConversionMap {
result := make(ConversionMap)
result[feet] = map[Unit]ConversionRatio{inches: feetToInches}
return result
}
func (q Quantity) convertTo(unit Unit) Quantity {
ratio := NewConversionMap()[q.Unit][unit]
return Quantity{q.Amount * ratio.Ratio, unit}
}
|
package oidcserver
import (
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
oidc "github.com/coreos/go-oidc"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/gorilla/mux"
"github.com/pardot/deci/oidcserver/internal"
storagepb "github.com/pardot/deci/proto/deci/storage/v1beta1"
"github.com/pardot/deci/storage"
"github.com/sirupsen/logrus"
jose "gopkg.in/square/go-jose.v2"
)
func (s *Server) handlePublicKeys(w http.ResponseWriter, r *http.Request) {
ks, err := s.signer.PublicKeys(r.Context())
if err != nil {
s.logger.WithError(err).Error("failed to fetch public keys")
s.renderError(w, http.StatusInternalServerError, "Internal server error.")
return
}
data, err := json.MarshalIndent(ks, "", " ")
if err != nil {
s.logger.Errorf("failed to marshal discovery data: %v", err)
s.renderError(w, http.StatusInternalServerError, "Internal server error.")
return
}
// TODO(lstoll): is it worth setting a better time for this, and caching here?
maxAge := 1 * time.Minute
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d, must-revalidate", int(maxAge.Seconds())))
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
if _, err := w.Write(data); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
type discovery struct {
Issuer string `json:"issuer"`
Auth string `json:"authorization_endpoint"`
Token string `json:"token_endpoint"`
Keys string `json:"jwks_uri"`
UserInfo string `json:"userinfo_endpoint"`
ResponseTypes []string `json:"response_types_supported"`
Subjects []string `json:"subject_types_supported"`
IDTokenAlgs []string `json:"id_token_signing_alg_values_supported"`
Scopes []string `json:"scopes_supported"`
AuthMethods []string `json:"token_endpoint_auth_methods_supported"`
Claims []string `json:"claims_supported"`
}
func (s *Server) discoveryHandler() (http.HandlerFunc, error) {
d := discovery{
Issuer: s.issuerURL.String(),
Auth: s.absURL("/auth"),
Token: s.absURL("/token"),
Keys: s.absURL("/keys"),
UserInfo: s.absURL("/userinfo"),
Subjects: []string{"public"},
IDTokenAlgs: []string{string(jose.RS256)},
Scopes: []string{"openid", "email", "groups", "profile", "offline_access"},
AuthMethods: []string{"client_secret_basic"},
Claims: []string{
"aud", "email", "email_verified", "exp",
"iat", "iss", "locale", "name", "sub",
},
}
for responseType := range s.supportedResponseTypes {
d.ResponseTypes = append(d.ResponseTypes, responseType)
}
sort.Strings(d.ResponseTypes)
data, err := json.MarshalIndent(d, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal discovery data: %v", err)
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
if _, err := w.Write(data); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}), nil
}
// handleAuthorization handles the OAuth2 auth endpoint.
func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) {
authReq, err := s.parseAuthorizationRequest(r)
if err != nil {
s.logger.Errorf("Failed to parse authorization request: %v", err)
if handler, ok := err.Handle(); ok {
// client_id and redirect_uri checked out and we can redirect back to
// the client with the error.
handler.ServeHTTP(w, r)
return
}
// Otherwise render the error to the user.
//
// TODO(ericchiang): Should we just always render the error?
s.renderError(w, err.Status(), err.Error())
return
}
// TODO(ericchiang): Create this authorization request later in the login flow
// so users don't hit "not found" database errors if they wait at the login
// screen too long.
//
// See: https://github.com/dexidp/dex/issues/646
var perr error
authExp := s.now().Add(s.authRequestsValidFor)
authReq.Expiry, perr = ptypes.TimestampProto(authExp)
if perr != nil {
s.logger.Errorf("Failed to convert timestamp: %v", err)
s.renderError(w, http.StatusInternalServerError, "Internal Error.")
return
}
if _, err := s.storage.PutWithExpiry(r.Context(), authReqKeyspace, authReq.Id, 0, authReq, authExp); err != nil {
s.logger.Errorf("Failed to create authorization request: %v", err)
s.renderError(w, http.StatusInternalServerError, "Failed to connect to the database.")
return
}
if len(s.connectors) == 1 {
for k := range s.connectors {
// TODO(ericchiang): Make this pass on r.URL.RawQuery and let something latter
// on create the auth request.
http.Redirect(w, r, s.absPath("/auth", k)+"?req="+authReq.Id, http.StatusFound)
return
}
}
connectorInfos := make([]connectorInfo, len(s.connectors))
i := 0
for k := range s.connectors {
connectorInfos[i] = connectorInfo{
ID: k,
Name: k,
// TODO(ericchiang): Make this pass on r.URL.RawQuery and let something latter
// on create the auth request.
URL: s.absPath("/auth", k) + "?req=" + authReq.Id,
}
i++
}
if err := s.templates.login(w, connectorInfos); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
}
func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) {
connID := mux.Vars(r)["connector"]
conn, ok := s.connectors[connID]
if !ok {
s.logger.Error("Failed to create authorization request: connector does not exist")
s.renderError(w, http.StatusNotFound, "Requested resource does not exist")
return
}
authReqID := r.FormValue("req")
authReq := &storagepb.AuthRequest{}
authReqVers, err := s.storage.Get(r.Context(), authReqKeyspace, authReqID, authReq)
if err != nil {
s.logger.Errorf("Failed to get auth request: %v", err)
if storage.IsNotFoundErr(err) {
s.renderError(w, http.StatusBadRequest, "Login session expired.")
} else {
s.renderError(w, http.StatusInternalServerError, "Database error.")
}
return
}
// Set the connector being used for the login.
if authReq.ConnectorId != connID {
authReq.ConnectorId = connID
_, err = s.storage.Put(r.Context(), authReqKeyspace, authReqID, authReqVers, authReq)
if err != nil {
s.logger.Errorf("Failed to set connector ID on auth request: %v", err)
s.renderError(w, http.StatusInternalServerError, "Database error.")
return
}
}
conn.LoginPage(w, r, asLoginRequest(authReq))
}
func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) {
authReq := &storagepb.AuthRequest{}
_, err := s.storage.Get(r.Context(), authReqKeyspace, r.FormValue("req"), authReq)
if err != nil {
s.logger.Errorf("Failed to get auth request: %v", err)
s.renderError(w, http.StatusInternalServerError, "Database error.")
return
}
if !authReq.LoggedIn {
s.logger.Errorf("Auth request does not have an identity for approval")
s.renderError(w, http.StatusInternalServerError, "Login process not yet finalized.")
return
}
switch r.Method {
case http.MethodGet:
if s.skipApproval {
s.sendCodeResponse(w, r, authReq)
return
}
client, err := s.clients.GetClient(authReq.ClientId)
if err != nil {
s.logger.Errorf("Failed to get client %q: %v", authReq.ClientId, err)
s.renderError(w, http.StatusInternalServerError, "Failed to retrieve client.")
return
}
if err := s.templates.approval(w, authReq.Id, authReq.Claims.Username, client.Name, authReq.Scopes); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
case http.MethodPost:
if r.FormValue("approval") != "approve" {
s.renderError(w, http.StatusInternalServerError, "Approval rejected.")
return
}
s.sendCodeResponse(w, r, authReq)
}
}
func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authReq *storagepb.AuthRequest) {
arexp, err := ptypes.Timestamp(authReq.Expiry)
if err != nil {
s.renderError(w, http.StatusInternalServerError, "Internal server error.")
return
}
if s.now().After(arexp) {
s.renderError(w, http.StatusBadRequest, "User session has expired.")
return
}
if err := s.storage.Delete(r.Context(), authReqKeyspace, authReq.Id); err != nil {
if !storage.IsNotFoundErr(err) {
s.logger.Errorf("Failed to delete authorization request: %v", err)
s.renderError(w, http.StatusInternalServerError, "Internal server error.")
} else {
s.renderError(w, http.StatusBadRequest, "User session error.")
}
return
}
u, err := url.Parse(authReq.RedirectUri)
if err != nil {
s.renderError(w, http.StatusInternalServerError, "Invalid redirect URI.")
return
}
var (
// Was the initial request using the implicit or hybrid flow instead of
// the "normal" code flow?
implicitOrHybrid = false
// Only present in hybrid or code flow. code.ID == "" if this is not set.
code *storagepb.AuthCode = &storagepb.AuthCode{}
// ID token returned immediately if the response_type includes "id_token".
// Only valid for implicit and hybrid flows.
idToken string
idTokenExpiry time.Time
// Access token
accessToken string
)
for _, responseType := range authReq.ResponseTypes {
switch responseType {
case responseTypeCode:
exp := s.now().Add(time.Minute * 30)
expTS, err := ptypes.TimestampProto(exp)
if err != nil {
s.renderError(w, http.StatusInternalServerError, "Internal Error.")
return
}
code = &storagepb.AuthCode{
Id: storage.NewID(),
ClientId: authReq.ClientId,
ConnectorId: authReq.ConnectorId,
Nonce: authReq.Nonce,
Scopes: authReq.Scopes,
Claims: authReq.Claims,
Expiry: expTS,
RedirectUri: authReq.RedirectUri,
ConnectorData: authReq.ConnectorData,
}
if _, err := s.storage.PutWithExpiry(r.Context(), authCodeKeyspace, code.Id, 0, code, exp); err != nil {
s.logger.Errorf("Failed to create auth code: %v", err)
s.renderError(w, http.StatusInternalServerError, "Internal server error.")
return
}
// Implicit and hybrid flows that try to use the OOB redirect URI are
// rejected earlier. If we got here we're using the code flow.
if authReq.RedirectUri == redirectURIOOB {
if err := s.templates.oob(w, code.Id); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
return
}
case responseTypeToken:
implicitOrHybrid = true
case responseTypeIDToken:
implicitOrHybrid = true
var err error
accessToken, err = s.newAccessToken(authReq.ClientId, authReq.Claims, authReq.Scopes, authReq.Nonce, authReq.ConnectorId)
if err != nil {
s.logger.Errorf("failed to create new access token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
idToken, idTokenExpiry, err = s.newIDToken(authReq.ClientId, authReq.Claims, authReq.Scopes, authReq.Nonce, accessToken, authReq.ConnectorId)
if err != nil {
s.logger.Errorf("failed to create ID token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
}
}
if implicitOrHybrid {
v := url.Values{}
v.Set("access_token", accessToken)
v.Set("token_type", "bearer")
v.Set("state", authReq.State)
if idToken != "" {
v.Set("id_token", idToken)
// The hybrid flow with only "code token" or "code id_token" doesn't return an
// "expires_in" value. If "code" wasn't provided, indicating the implicit flow,
// don't add it.
//
// https://openid.net/specs/openid-connect-core-1_0.html#HybridAuthResponse
if code.Id == "" {
v.Set("expires_in", strconv.Itoa(int(idTokenExpiry.Sub(s.now()).Seconds())))
}
}
if code.Id != "" {
v.Set("code", code.Id)
}
// Implicit and hybrid flows return their values as part of the fragment.
//
// HTTP/1.1 303 See Other
// Location: https://client.example.org/cb#
// access_token=SlAV32hkKG
// &token_type=bearer
// &id_token=eyJ0 ... NiJ9.eyJ1c ... I6IjIifX0.DeWt4Qu ... ZXso
// &expires_in=3600
// &state=af0ifjsldkj
//
u.Fragment = v.Encode()
} else {
// The code flow add values to the URL query.
//
// HTTP/1.1 303 See Other
// Location: https://client.example.org/cb?
// code=SplxlOBeZQQYbYS6WxSbIA
// &state=af0ifjsldkj
//
q := u.Query()
q.Set("code", code.Id)
q.Set("state", authReq.State)
u.RawQuery = q.Encode()
}
http.Redirect(w, r, u.String(), http.StatusSeeOther)
}
func (s *Server) handleToken(w http.ResponseWriter, r *http.Request) {
clientID, clientSecret, ok := r.BasicAuth()
if ok {
var err error
if clientID, err = url.QueryUnescape(clientID); err != nil {
s.tokenErrHelper(w, errInvalidRequest, "client_id improperly encoded", http.StatusBadRequest)
return
}
if clientSecret, err = url.QueryUnescape(clientSecret); err != nil {
s.tokenErrHelper(w, errInvalidRequest, "client_secret improperly encoded", http.StatusBadRequest)
return
}
} else {
clientID = r.PostFormValue("client_id")
clientSecret = r.PostFormValue("client_secret")
}
client, err := s.clients.GetClient(clientID)
if err != nil {
if !isNoSuchClientErr(err) {
s.logger.Errorf("failed to get client: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
} else {
s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized)
}
return
}
if client.Secret != clientSecret {
s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized)
return
}
grantType := r.PostFormValue("grant_type")
switch grantType {
case grantTypeAuthorizationCode:
s.handleAuthCode(w, r, client)
case grantTypeRefreshToken:
s.handleRefreshToken(w, r, client)
default:
s.tokenErrHelper(w, errInvalidGrant, "", http.StatusBadRequest)
}
}
// handle an access token request https://tools.ietf.org/html/rfc6749#section-4.1.3
func (s *Server) handleAuthCode(w http.ResponseWriter, r *http.Request, client *Client) {
code := r.PostFormValue("code")
redirectURI := r.PostFormValue("redirect_uri")
authCode := &storagepb.AuthCode{}
_, err := s.storage.Get(r.Context(), authCodeKeyspace, code, authCode)
if err != nil {
if !storage.IsNotFoundErr(err) {
s.logger.Errorf("failed to get auth code: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
} else {
s.tokenErrHelper(w, errInvalidRequest, "Invalid or expired code parameter.", http.StatusBadRequest)
}
return
}
authCodeExp, err := ptypes.Timestamp(authCode.Expiry)
if err != nil || s.now().After(authCodeExp) || authCode.ClientId != client.ID {
s.tokenErrHelper(w, errInvalidRequest, "Invalid or expired code parameter.", http.StatusBadRequest)
return
}
if authCode.RedirectUri != redirectURI {
s.tokenErrHelper(w, errInvalidRequest, "redirect_uri did not match URI from initial request.", http.StatusBadRequest)
return
}
accessToken, err := s.newAccessToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, authCode.ConnectorId)
if err != nil {
s.logger.Errorf("failed to create new access token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
idToken, expiry, err := s.newIDToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, accessToken, authCode.ConnectorId)
if err != nil {
s.logger.Errorf("failed to create ID token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
if err := s.storage.Delete(r.Context(), authCodeKeyspace, code); err != nil {
s.logger.Errorf("failed to delete auth code: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
reqRefresh := func() bool {
// Ensure the connector supports refresh tokens.
//
// Connectors like `saml` do not implement RefreshConnector.
conn, ok := s.connectors[authCode.ConnectorId]
if !ok {
s.logger.Errorf("connector with ID %q not found: %v", authCode.ConnectorId, err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return false
}
_, ok = conn.(RefreshConnector)
if !ok {
return false
}
for _, scope := range authCode.Scopes {
if scope == scopeOfflineAccess {
return true
}
}
return false
}()
var refreshToken string
if reqRefresh {
nowts, err := ptypes.TimestampProto(s.now())
if err != nil {
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
refresh := &storagepb.RefreshToken{
Id: storage.NewID(),
Token: storage.NewID(),
ClientId: authCode.ClientId,
ConnectorId: authCode.ConnectorId,
Scopes: authCode.Scopes,
Claims: authCode.Claims,
Nonce: authCode.Nonce,
ConnectorData: authCode.ConnectorData,
CreatedAt: nowts,
LastUsed: nowts,
}
token := &internal.RefreshToken{
RefreshId: refresh.Id,
Token: refresh.Token,
}
if refreshToken, err = internal.Marshal(token); err != nil {
s.logger.Errorf("failed to marshal refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
if _, err := s.storage.Put(r.Context(), refreshTokenKeyspace, refresh.Id, 0, refresh); err != nil {
s.logger.Errorf("failed to create refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
// deleteToken determines if we need to delete the newly created refresh token
// due to a failure in updating/creating the OfflineSession object for the
// corresponding user.
var deleteToken bool
defer func() {
if deleteToken {
// Delete newly created refresh token from storage.
if err := s.storage.Delete(r.Context(), refreshTokenKeyspace, refresh.Id); err != nil {
s.logger.Errorf("failed to delete refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
}
}()
tokenRef := &storagepb.RefreshTokenRef{
Id: refresh.Id,
ClientId: refresh.ClientId,
CreatedAt: refresh.CreatedAt,
LastUsed: refresh.LastUsed,
}
// Try to retrieve an existing OfflineSession object for the corresponding user.
session := &storagepb.OfflineSessions{}
if sessVer, err := s.storage.Get(r.Context(), offlineSessionsKeyspace, offlineSessionID(refresh.Claims.UserId, refresh.ConnectorId), session); err != nil {
if !storage.IsNotFoundErr(err) {
s.logger.Errorf("failed to get offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
offlineSessions := &storagepb.OfflineSessions{
UserId: refresh.Claims.UserId,
ConnId: refresh.ConnectorId,
Refresh: make(map[string]*storagepb.RefreshTokenRef),
}
offlineSessions.Refresh[tokenRef.ClientId] = tokenRef
// Create a new OfflineSession object for the user and add a reference object for
// the newly received refreshtoken.
if _, err := s.storage.Put(r.Context(), offlineSessionsKeyspace, offlineSessionID(refresh.Claims.UserId, refresh.ConnectorId), 0, offlineSessions); err != nil {
s.logger.Errorf("failed to create offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
} else {
if oldTokenRef, ok := session.Refresh[tokenRef.ClientId]; ok {
// Delete old refresh token from storage.
if err := s.storage.Delete(r.Context(), refreshTokenKeyspace, oldTokenRef.Id); err != nil {
s.logger.Errorf("failed to delete refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
}
// Update existing OfflineSession obj with new RefreshTokenRef.
session.Refresh[tokenRef.ClientId] = tokenRef
if _, err := s.storage.Put(r.Context(), offlineSessionsKeyspace, offlineSessionID(refresh.Claims.UserId, refresh.ConnectorId), sessVer, session); err != nil {
s.logger.Errorf("failed to update offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
deleteToken = true
return
}
}
}
s.writeAccessToken(w, idToken, accessToken, refreshToken, expiry)
}
// handle a refresh token request https://tools.ietf.org/html/rfc6749#section-6
func (s *Server) handleRefreshToken(w http.ResponseWriter, r *http.Request, client *Client) {
code := r.PostFormValue("refresh_token")
scope := r.PostFormValue("scope")
if code == "" {
s.tokenErrHelper(w, errInvalidRequest, "No refresh token in request.", http.StatusBadRequest)
return
}
token := new(internal.RefreshToken)
if err := internal.Unmarshal(code, token); err != nil {
// For backward compatibility, assume the refresh_token is a raw refresh token ID
// if it fails to decode.
//
// Because refresh_token values that aren't unmarshable were generated by servers
// that don't have a Token value, we'll still reject any attempts to claim a
// refresh_token twice.
token = &internal.RefreshToken{RefreshId: code, Token: ""}
}
refresh := &storagepb.RefreshToken{}
refreshVers, err := s.storage.Get(r.Context(), refreshTokenKeyspace, token.RefreshId, refresh)
if err != nil {
s.logger.Errorf("failed to get refresh token: %v", err)
if storage.IsNotFoundErr(err) {
s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest)
} else {
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
}
return
}
if refresh.ClientId != client.ID {
s.logger.Errorf("client %s trying to claim token for client %s", client.ID, refresh.ClientId)
s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest)
return
}
if refresh.Token != token.Token {
s.logger.Errorf("refresh token with id %s claimed twice", refresh.Id)
s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest)
return
}
// Per the OAuth2 spec, if the client has omitted the scopes, default to the original
// authorized scopes.
//
// https://tools.ietf.org/html/rfc6749#section-6
scopes := refresh.Scopes
if scope != "" {
requestedScopes := strings.Fields(scope)
var unauthorizedScopes []string
for _, s := range requestedScopes {
contains := func() bool {
for _, scope := range refresh.Scopes {
if s == scope {
return true
}
}
return false
}()
if !contains {
unauthorizedScopes = append(unauthorizedScopes, s)
}
}
if len(unauthorizedScopes) > 0 {
msg := fmt.Sprintf("Requested scopes contain unauthorized scope(s): %q.", unauthorizedScopes)
s.tokenErrHelper(w, errInvalidRequest, msg, http.StatusBadRequest)
return
}
scopes = requestedScopes
}
conn, ok := s.connectors[refresh.ConnectorId]
if !ok {
s.logger.Errorf("connector with ID %q not found", refresh.ConnectorId)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
ident := Identity{
UserID: refresh.Claims.UserId,
Username: refresh.Claims.Username,
Email: refresh.Claims.Email,
EmailVerified: refresh.Claims.EmailVerified,
Groups: refresh.Claims.Groups,
AMR: refresh.Claims.Amr,
ConnectorData: refresh.ConnectorData,
}
if refresh.Claims.Acr != nil {
acr := refresh.Claims.Acr.Value
ident.ACR = &acr
}
// Can the connector refresh the identity? If so, attempt to refresh the data
// in the connector.
//
// TODO(ericchiang): We may want a strict mode where connectors that don't implement
// this interface can't perform refreshing.
if refreshConn, ok := conn.(RefreshConnector); ok {
newIdent, err := refreshConn.Refresh(r.Context(), parseScopes(scopes), ident)
if isRetryableErr(err) {
s.logger.Errorf("failed to refresh identity: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
} else if err != nil {
s.logger.Errorf("failed to refresh identity: %v", err)
s.tokenErrHelper(w, errInvalidGrant, "", http.StatusBadRequest)
return
}
ident = newIdent
}
claims := &storagepb.Claims{
UserId: ident.UserID,
Username: ident.Username,
Email: ident.Email,
EmailVerified: ident.EmailVerified,
Groups: ident.Groups,
Amr: ident.AMR,
}
if ident.ACR != nil {
claims.Acr = &wrappers.StringValue{Value: *ident.ACR}
}
accessToken, err := s.newAccessToken(client.ID, claims, scopes, refresh.Nonce, refresh.ConnectorId)
if err != nil {
s.logger.Errorf("failed to create new access token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
idToken, expiry, err := s.newIDToken(client.ID, claims, scopes, refresh.Nonce, accessToken, refresh.ConnectorId)
if err != nil {
s.logger.Errorf("failed to create ID token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
newToken := &internal.RefreshToken{
RefreshId: refresh.Id,
Token: storage.NewID(),
}
rawNewToken, err := internal.Marshal(newToken)
if err != nil {
s.logger.Errorf("failed to marshal refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
lastUsed, err := ptypes.TimestampProto(s.now())
if err != nil {
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
// Update LastUsed time stamp in refresh token reference object
// in offline session for the user.
offlineSession := &storagepb.OfflineSessions{}
offlineSessionVers, err := s.storage.Get(r.Context(), offlineSessionsKeyspace, offlineSessionID(refresh.Claims.UserId, refresh.ConnectorId), offlineSession)
if err != nil {
s.logger.Errorf("failed to fetch offline session for update: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
if offlineSession.Refresh[refresh.ClientId].Id != refresh.Id {
s.tokenErrHelper(w, errServerError, "Offline Session Invalid", http.StatusInternalServerError)
return
}
offlineSession.Refresh[refresh.ClientId].LastUsed = lastUsed
if _, err := s.storage.Put(r.Context(), offlineSessionsKeyspace, offlineSessionID(refresh.Claims.UserId, refresh.ConnectorId), offlineSessionVers, offlineSession); err != nil {
s.logger.Errorf("failed to update offline session: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
refresh.Token = newToken.Token
// Update the claims of the refresh token.
//
// UserID intentionally ignored for now.
refresh.Claims.Username = ident.Username
refresh.Claims.Email = ident.Email
refresh.Claims.EmailVerified = ident.EmailVerified
refresh.Claims.Groups = ident.Groups
refresh.ConnectorData = ident.ConnectorData
refresh.LastUsed = lastUsed
// Update refresh token in the storage.
if _, err := s.storage.Put(r.Context(), refreshTokenKeyspace, refresh.Id, refreshVers, refresh); err != nil {
if storage.IsConflictErr(err) {
s.tokenErrHelper(w, errServerError, "refresh token claimed twice", http.StatusInternalServerError)
return
}
s.logger.Errorf("failed to update refresh token: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
s.logger.WithFields(logrus.Fields{
"connector": refresh.ConnectorId,
"userid": refresh.Claims.UserId,
"username": refresh.Claims.Username,
"email": refresh.Claims.Email,
"groups": refresh.Claims.Groups,
"acr": refresh.Claims.Acr,
"amr": refresh.Claims.Amr,
"at": "refresh-successful",
}).Info()
s.writeAccessToken(w, idToken, accessToken, rawNewToken, expiry)
}
func (s *Server) handleUserInfo(w http.ResponseWriter, r *http.Request) {
const prefix = "Bearer "
auth := r.Header.Get("authorization")
if len(auth) < len(prefix) || !strings.EqualFold(prefix, auth[:len(prefix)]) {
w.Header().Set("WWW-Authenticate", "Bearer")
s.tokenErrHelper(w, errAccessDenied, "Invalid bearer token.", http.StatusUnauthorized)
return
}
rawIDToken := auth[len(prefix):]
verifier := oidc.NewVerifier(s.issuerURL.String(), s.signer, &oidc.Config{SkipClientIDCheck: true})
idToken, err := verifier.Verify(r.Context(), rawIDToken)
if err != nil {
s.tokenErrHelper(w, errAccessDenied, err.Error(), http.StatusForbidden)
return
}
var claims json.RawMessage
if err := idToken.Claims(&claims); err != nil {
s.tokenErrHelper(w, errServerError, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
if _, err := w.Write(claims); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
func (s *Server) writeAccessToken(w http.ResponseWriter, idToken, accessToken, refreshToken string, expiry time.Time) {
resp := struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token,omitempty"`
IDToken string `json:"id_token"`
}{
accessToken,
"bearer",
int(expiry.Sub(s.now()).Seconds()),
refreshToken,
idToken,
}
data, err := json.Marshal(resp)
if err != nil {
s.logger.Errorf("failed to marshal access token response: %v", err)
s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
if _, err := w.Write(data); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
func (s *Server) renderError(w http.ResponseWriter, status int, description string) {
if err := s.templates.err(w, status, description); err != nil {
s.logger.Errorf("Server template error: %v", err)
}
}
func (s *Server) tokenErrHelper(w http.ResponseWriter, typ string, description string, statusCode int) {
if err := tokenErr(w, typ, description, statusCode); err != nil {
s.logger.Errorf("token error response: %v", err)
}
}
func offlineSessionID(userID, connID string) string {
return fmt.Sprintf(
"%s-%s",
base64.StdEncoding.EncodeToString([]byte(userID)),
base64.StdEncoding.EncodeToString([]byte(connID)),
)
}
func asLoginRequest(authReq *storagepb.AuthRequest) LoginRequest {
return LoginRequest{
AuthID: authReq.Id,
Scopes: parseScopes(authReq.Scopes),
ACRValues: authReq.AcrValues,
}
}
|
package service
import (
"bytes"
"encoding/json"
"fmt"
permissions "github.com/carprks/permissions/service"
"io/ioutil"
"net/http"
"os"
"time"
)
// AllowedHandler ...
func AllowedHandler(body string) (string, error) {
r := permissions.Permissions{}
err := json.Unmarshal([]byte(body), &r)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshall input: %v, %v", err, body))
return "", fmt.Errorf("can't unmarshall input: %w", err)
}
rf, err := Allowed(r)
if err != nil {
fmt.Println(fmt.Sprintf("can't get allowed: %v, %v", err, r))
return "", fmt.Errorf("can't get allowed: %w", err)
}
rfb, err := json.Marshal(rf)
if err != nil {
fmt.Println(fmt.Sprintf("can't marshal allowed: %v, %v", err, rf))
return "", fmt.Errorf("can't unmarshal allowed: %w", err)
}
return string(rfb), nil
}
// Allowed ...
func Allowed(p permissions.Permissions) (permissions.Permissions, error) {
pr := permissions.Permissions{}
j, err := json.Marshal(&p)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshal permissions: %v, %v", err, p))
return pr, fmt.Errorf("can't unmarshal permissions: %w", err)
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/allowed", os.Getenv("SERVICE_PERMISSIONS")), bytes.NewBuffer(j))
if err != nil {
fmt.Println(fmt.Sprintf("req err: %v", err))
return pr, fmt.Errorf("req allowed err: %w", err)
}
req.Header.Set("X-Authorization", os.Getenv("AUTH_PERMISSIONS"))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 2 * time.Minute,
},
}
resp, err := client.Do(req)
if err != nil {
fmt.Println(fmt.Sprintf("verify client err: %v", err))
return pr, fmt.Errorf("verify client err: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(fmt.Sprintf("verify resp err: %v", err))
return pr, fmt.Errorf("verify resp err: %w", err)
}
err = json.Unmarshal(body, &pr)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshall permissions: %v, %v", err, string(body)))
return pr, fmt.Errorf("can't unmarshall permissions: %w", err)
}
return pr, nil
}
return pr, fmt.Errorf("allowed came back with a different statuscode: %v", resp.StatusCode)
}
|
package main
import "fmt"
func main() {
var i interface{}
i = 10
// 值, 值得判断 := 接口变量.(数据类型)
if value, ok := i.(int); ok {
fmt.Println("整型数据:", value)
} else {
fmt.Println("错误")
}
} |
package services
import (
"log"
"os"
"time"
)
type LoggingService struct {
Context string
}
func NewLoggingService(context string) *LoggingService {
return &LoggingService{
Context: context,
}
}
func (l *LoggingService) Log(message string) {
if os.Getenv("SERVER_DEBUG") == "true" {
log.Println(time.Now().UTC(), " ", "[", l.Context, "]", " ", message)
}
} |
package main
import (
"net/http"
"math/rand"
"time"
"net/url"
"strings"
)
type Spider struct {
UserAgent string
Method string
URL string
ContentType string
Referer string
Data url.Values
Response *http.Response
}
func (spider *Spider) do() error {
client := &http.Client{}
req, err := http.NewRequest(spider.Method, spider.URL, strings.NewReader(spider.Data.Encode()))
if err != nil {
return err
}
if len(spider.ContentType) == 0 {
spider.ContentType = ""
}
req.Header.Set("Content-Type", spider.ContentType)
/* set user-agent */
if len(spider.UserAgent) == 0 {
spider.UserAgent = spider.getUA()
}
req.Header.Set("User-Agent", spider.UserAgent)
if len(spider.Referer) == 0 {
spider.Referer = ""
}
req.Header.Set("Referer", spider.Referer)
spider.Response, err = client.Do(req)
if err != nil {
return err
}
return nil
}
func (spider *Spider) getResponse() *http.Response {
return spider.Response
}
func (spider *Spider) getUA() string {
rand.Seed(time.Now().Unix())
UAs := []string{
"Mozilla/5.0 (X11; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0",
"Mozilla/5.0 (X11; Linux i586; rv:63.0) Gecko/20100101 Firefox/63.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:63.0) Gecko/20100101 Firefox/63.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:10.0) Gecko/20100101 Firefox/62.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.13; ko; rv:1.9.1b2) Gecko/20081201 Firefox/60.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/58.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14931",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
}
return UAs[rand.Intn(len(UAs))]
}
|
package state
import "github.com/guregu/null"
type PostgresRelationStats struct {
SizeBytes int64 // On-disk size including FSM and VM, plus TOAST table if any, excluding indices
ToastSizeBytes int64 // TOAST table and TOAST index size (included in SizeBytes as well)
SeqScan int64 // Number of sequential scans initiated on this table
SeqTupRead int64 // Number of live rows fetched by sequential scans
IdxScan int64 // Number of index scans initiated on this table
IdxTupFetch int64 // Number of live rows fetched by index scans
NTupIns int64 // Number of rows inserted
NTupUpd int64 // Number of rows updated
NTupDel int64 // Number of rows deleted
NTupHotUpd int64 // Number of rows HOT updated (i.e., with no separate index update required)
NLiveTup int64 // Estimated number of live rows
NDeadTup int64 // Estimated number of dead rows
NModSinceAnalyze int64 // Estimated number of rows modified since this table was last analyzed
NInsSinceVacuum int64 // Estimated number of rows inserted since this table was last vacuumed
LastVacuum null.Time // Last time at which this table was manually vacuumed (not counting VACUUM FULL)
LastAutovacuum null.Time // Last time at which this table was vacuumed by the autovacuum daemon
LastAnalyze null.Time // Last time at which this table was manually analyzed
LastAutoanalyze null.Time // Last time at which this table was analyzed by the autovacuum daemon
VacuumCount int64 // Number of times this table has been manually vacuumed (not counting VACUUM FULL)
AutovacuumCount int64 // Number of times this table has been vacuumed by the autovacuum daemon
AnalyzeCount int64 // Number of times this table has been manually analyzed
AutoanalyzeCount int64 // Number of times this table has been analyzed by the autovacuum daemon
HeapBlksRead int64 // Number of disk blocks read from this table
HeapBlksHit int64 // Number of buffer hits in this table
IdxBlksRead int64 // Number of disk blocks read from all indexes on this table
IdxBlksHit int64 // Number of buffer hits in all indexes on this table
ToastBlksRead int64 // Number of disk blocks read from this table's TOAST table (if any)
ToastBlksHit int64 // Number of buffer hits in this table's TOAST table (if any)
TidxBlksRead int64 // Number of disk blocks read from this table's TOAST table indexes (if any)
TidxBlksHit int64 // Number of buffer hits in this table's TOAST table indexes (if any)
FrozenXIDAge int32 // Age of frozen XID for this table
MinMXIDAge int32 // Age of minimum multixact ID for this table
Relpages int32 // Size of the on-disk representation of this table in pages (of size BLCKSZ)
Reltuples float32 // Number of live rows in the table. -1 indicating that the row count is unknown
Relallvisible int32 // Number of pages that are marked all-visible in the table's visibility map
ExclusivelyLocked bool // Whether these statistics are zeroed out because the table was locked at collection time
ToastReltuples float32 // Number of live rows in the TOAST table. -1 indicating that the row count is unknown
ToastRelpages int32 // Size of the on-disk representation of the TOAST table in pages (of size BLCKSZ)
}
type PostgresIndexStats struct {
SizeBytes int64
IdxScan int64 // Number of index scans initiated on this index
IdxTupRead int64 // Number of index entries returned by scans on this index
IdxTupFetch int64 // Number of live table rows fetched by simple index scans using this index
IdxBlksRead int64 // Number of disk blocks read from this index
IdxBlksHit int64 // Number of buffer hits in this index
ExclusivelyLocked bool // Whether these statistics are zeroed out because the index was locked at collection time
}
type PostgresColumnStats struct {
SchemaName string
TableName string
ColumnName string
Inherited bool
NullFrac float64
AvgWidth int32
NDistinct float64
Correlation null.Float
}
// PostgresColumnStatsKey - Information that uniquely identifies column stats
type PostgresColumnStatsKey struct {
SchemaName string
TableName string
ColumnName string
}
type PostgresRelationStatsMap map[Oid]PostgresRelationStats
type PostgresIndexStatsMap map[Oid]PostgresIndexStats
type PostgresColumnStatsMap map[PostgresColumnStatsKey][]PostgresColumnStats
type DiffedPostgresRelationStats PostgresRelationStats
type DiffedPostgresIndexStats PostgresIndexStats
type DiffedPostgresRelationStatsMap map[Oid]DiffedPostgresRelationStats
type DiffedPostgresIndexStatsMap map[Oid]DiffedPostgresIndexStats
func (curr PostgresRelationStats) DiffSince(prev PostgresRelationStats) DiffedPostgresRelationStats {
return DiffedPostgresRelationStats{
SizeBytes: curr.SizeBytes,
ToastSizeBytes: curr.ToastSizeBytes,
SeqScan: curr.SeqScan - prev.SeqScan,
SeqTupRead: curr.SeqTupRead - prev.SeqTupRead,
IdxScan: curr.IdxScan - prev.IdxScan,
IdxTupFetch: curr.IdxTupFetch - prev.IdxTupFetch,
NTupIns: curr.NTupIns - prev.NTupIns,
NTupUpd: curr.NTupUpd - prev.NTupUpd,
NTupDel: curr.NTupDel - prev.NTupDel,
NTupHotUpd: curr.NTupHotUpd - prev.NTupHotUpd,
NLiveTup: curr.NLiveTup,
NDeadTup: curr.NDeadTup,
NModSinceAnalyze: curr.NModSinceAnalyze,
NInsSinceVacuum: curr.NInsSinceVacuum,
LastVacuum: curr.LastVacuum,
LastAutovacuum: curr.LastAutovacuum,
LastAnalyze: curr.LastAnalyze,
LastAutoanalyze: curr.LastAutoanalyze,
VacuumCount: curr.VacuumCount - prev.VacuumCount,
AutovacuumCount: curr.AutovacuumCount - prev.AutovacuumCount,
AnalyzeCount: curr.AnalyzeCount - prev.AnalyzeCount,
AutoanalyzeCount: curr.AutoanalyzeCount - prev.AutoanalyzeCount,
HeapBlksRead: curr.HeapBlksRead - prev.HeapBlksRead,
HeapBlksHit: curr.HeapBlksHit - prev.HeapBlksHit,
IdxBlksRead: curr.IdxBlksRead - prev.IdxBlksRead,
IdxBlksHit: curr.IdxBlksHit - prev.IdxBlksHit,
ToastBlksRead: curr.ToastBlksRead - prev.ToastBlksRead,
ToastBlksHit: curr.ToastBlksHit - prev.ToastBlksHit,
TidxBlksRead: curr.TidxBlksRead - prev.TidxBlksRead,
TidxBlksHit: curr.TidxBlksHit - prev.TidxBlksHit,
FrozenXIDAge: curr.FrozenXIDAge,
MinMXIDAge: curr.MinMXIDAge,
Relpages: curr.Relpages,
Reltuples: curr.Reltuples,
Relallvisible: curr.Relallvisible,
ToastReltuples: curr.ToastReltuples,
ToastRelpages: curr.ToastRelpages,
}
}
func (curr PostgresIndexStats) DiffSince(prev PostgresIndexStats) DiffedPostgresIndexStats {
return DiffedPostgresIndexStats{
SizeBytes: curr.SizeBytes,
IdxScan: curr.IdxScan - prev.IdxScan,
IdxTupRead: curr.IdxTupRead - prev.IdxTupRead,
IdxTupFetch: curr.IdxTupFetch - prev.IdxTupFetch,
IdxBlksRead: curr.IdxBlksRead - prev.IdxBlksRead,
IdxBlksHit: curr.IdxBlksHit - prev.IdxBlksHit,
}
}
|
package main
import (
"flag"
"fmt"
"os"
"github.com/lestrrat-go/file-rotatelogs"
"github.com/sirupsen/logrus"
. "IRIS_WEB/config"
"IRIS_WEB/utility/db"
"IRIS_WEB/web"
)
func main() {
// 初始化配置文件
flag.Parse()
fmt.Print("InitConfig...\r")
checkErr("InitConfig", InitConfig())
fmt.Print("InitConfig Success!!!\n")
// 创建文件日志,按天分割,日志文件仅保留一周
w, err := rotatelogs.New(Conf.LogPath)
checkErr("CreateRotateLog", err)
// 设置日志
logrus.SetOutput(w)
logrus.SetFormatter(&logrus.JSONFormatter{})
logrus.SetReportCaller(true)
// 启动mysql
defer db.CloseMysql()
fmt.Print("StartMysql...\r")
checkErr("StartMysql", db.StartMysql(Conf.MysqlDsn, Conf.MysqlMaxIdle, Conf.MysqlMaxOpen))
fmt.Print("StartMysql Success!!!\n")
// 启动redis
defer db.CloseRedis()
fmt.Print("StartRedis...\r")
checkErr("StartRedis", db.StartRedis(Conf.RedisAddr, Conf.RedisDB, Conf.RedisMaxIdle, Conf.RedisMaxOpen))
fmt.Print("StartRedis Success!!!\n")
// 开始运行iris框架
fmt.Print("RunIris...\r")
web.RunIris(Conf.ServerPort)
}
// 检查错误
func checkErr(errMsg string, err error) {
if err != nil {
fmt.Printf("%s Error: %v\n", errMsg, err)
os.Exit(1)
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package hwsec
import (
"context"
hwsecremote "chromiumos/tast/remote/hwsec"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ClearOwnership,
Desc: "Verifies that the TPM ownership can be cleared",
Contacts: []string{"cylai@chromium.org", "cros-hwsec@google.com"},
SoftwareDeps: []string{"reboot", "tpm"},
Attr: []string{"group:hwsec_destructive_func"},
ServiceDeps: []string{"tast.cros.hwsec.AttestationDBusService"},
})
}
func ClearOwnership(ctx context.Context, s *testing.State) {
r := hwsecremote.NewCmdRunner(s.DUT())
helper, err := hwsecremote.NewFullHelper(r, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Helper creation error: ", err)
}
s.Log("Start resetting TPM if needed")
if err := helper.EnsureTPMIsReset(ctx); err != nil {
s.Fatal("Failed to ensure resetting TPM: ", err)
}
s.Log("TPM is confirmed to be reset")
}
|
package gevent
import (
"sync"
)
/* ================================================================================
* gevent
* qq group: 582452342
* email : 2091938785@qq.com
* author : 美丽的地球啊 - mliu
* ================================================================================ */
type (
ISubscriberHandler interface {
GetChannelName() string
GetEventName() string
Handler(*Event)
}
SubscriberList []*Subscriber
Subscriber struct {
Priority int //subscription priority
Repeat int //Number of triggers(0:Infinite | 1:once | ...)
Counts sync.Map //event counter
handler EventHandler //event processor
creationDate int64 //subscription time (nanoseconds)
}
)
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* sort Interface - collection length
* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
func (list SubscriberList) Len() int {
return len(list)
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* sort interface - collection item comparison
* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
func (list SubscriberList) Less(i, j int) bool {
if list[i].Priority < list[j].Priority {
return true
}
return false
}
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* sort interface - swap collection items
* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
func (list SubscriberList) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
|
package config
import (
"log"
"os"
"github.com/joho/godotenv"
)
//AppName : application name
var AppName string
//HTTPPort : rest api port
var HTTPPort string
//GinMode : gin spesific mode
var GinMode string
//DbName : database name
var DbName string
//DbDebug : if set to true will print out the query string
var DbDebug bool
//Load : get .env file then set the environment variables
func Load() {
if err := godotenv.Load(); err != nil {
log.Println("Error loading .env file ...")
log.Println("Using default configuration")
}
if AppName = os.Getenv("APP_NAME"); AppName == "" {
AppName = "My App"
}
if GinMode = os.Getenv("GIN_MODE"); GinMode != "release" {
GinMode = "debug"
}
if HTTPPort = os.Getenv("APP_PORT"); HTTPPort == "" {
HTTPPort = "8080"
}
if DbName = os.Getenv("DB_NAME"); DbName == "" {
DbName = "mydata"
}
if config := os.Getenv("DB_DEBUG"); config == "true" {
DbDebug = true
}
}
|
package easyquery
import (
"easyquery/tools/constant"
"easyquery/tools/reflection"
"easyquery/tools/stringutil"
"fmt"
"strings"
"github.com/iancoleman/strcase"
"github.com/gin-gonic/gin"
)
type QueryField struct {
Name string
Type QueryFieldType
Operation string
Value interface{}
Join bool
JoinTable string
}
type QueryFieldType string
const (
String QueryFieldType = constant.String
Array QueryFieldType = constant.Array
NullOrExist QueryFieldType = constant.NullOrExist
OrInEq QueryFieldType = constant.OrInEq
OrInLike QueryFieldType = constant.OrInLike
OrOutEq QueryFieldType = constant.OrOutEq
OrOutLike QueryFieldType = constant.OrOutLike
Order QueryFieldType = constant.Order
Association QueryFieldType = constant.Association
Group QueryFieldType = constant.Group
)
var ArrayValues = map[string]QueryFieldType{
"in": Array,
"not_in": Array,
"or_in_eq": OrInEq,
"or_in_like": OrInLike,
"or_out_eq": OrOutEq,
"or_out_like": OrOutLike,
}
func (baseHandler *BaseHandler) FieldExtractor(c *gin.Context, model interface{}) {
var (
queries []*QueryField
join bool
)
fields := reflection.TransferFields(model, false)
fields = AppendJoinFields(c, model, fields)
for _, field := range fields {
value, ok := c.GetQueryMap(field)
if !ok {
continue
}
query := &QueryField{Name: field, Value: value}
queries = Transform(c, query, queries)
for _, queryField := range queries {
if strings.Contains(queryField.Name, constant.Delimiter) {
JoinExtractor(queryField)
join = true
}
}
}
baseHandler.Fields = queries
baseHandler.Join = join
}
func Transform(c *gin.Context, queryField *QueryField, queries []*QueryField) []*QueryField {
switch queryField.Value.(type) {
case map[string]string:
items, _ := queryField.Value.(map[string]string)
for k, v := range items {
field := &QueryField{}
field.Name = queryField.Name
field.Operation = k
field.Type = String
field.Value = v
if stringutil.StrInsenContains(k, constant.NotNull, constant.SnotNull, constant.IsNull, constant.IsEmpty) {
field.Type = NullOrExist
} else if stringutil.StrInsensitive(constant.Order, k) {
field.Type = Order
} else if stringutil.StrInsensitive(constant.Group, k) {
field.Type = Group
} else if typ, ok := ArrayValues[k]; ok {
field.Type = typ
ParseArrayParams(c, field)
}
queries = append(queries, field)
}
}
return queries
}
func ParseArrayParams(c *gin.Context, queryField *QueryField) {
key := fmt.Sprintf("%s[%s]", queryField.Name, queryField.Operation)
query := c.Request.URL.Query()
queryField.Value = query[key]
}
func AppendJoinFields(c *gin.Context, model interface{}, fields []string) []string {
flag := false
query := c.Request.URL.Query()
for key, _ := range query {
if strings.Contains(key, constant.Delimiter) {
flag = true
break
}
}
if flag {
if method, ok := model.(Joinser); ok {
for _, join := range method.Joins() {
fields = append(fields, reflection.TransferFields(join, true)...)
}
}
}
return fields
}
func JoinExtractor(queryField *QueryField) {
arr := strings.Split(queryField.Name, constant.Delimiter)
if len(arr) != 2 {
panic(constant.QueryParamError)
}
joinTable := strcase.ToCamel(arr[0])
queryField.Join = true
queryField.JoinTable = joinTable
queryField.Name = fmt.Sprintf(`"%s"."%s"`, joinTable, arr[1])
}
|
package grpcserver
import (
"context"
"net"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/NataliaZabelina/monitoring/api"
monitoring "github.com/NataliaZabelina/monitoring/internal/app"
"github.com/NataliaZabelina/monitoring/internal/config"
"github.com/NataliaZabelina/monitoring/internal/logger"
"github.com/NataliaZabelina/monitoring/internal/storage"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/timestamppb"
)
var grpcServer *grpc.Server
type GrpcServer struct {
api.UnimplementedMonitoringServer
logger *zap.SugaredLogger
monitoring *monitoring.Monitoring
db *storage.DB
cfg *config.Config
}
func Start(db *storage.DB, monitoring *monitoring.Monitoring, log *zap.SugaredLogger, cfg *config.Config) error {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM)
defer signal.Stop(interrupt)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
err := monitoring.Run(ctx)
if err != nil {
log.Fatal("Can't start monitoring")
return
}
}()
wg.Add(1)
go func() {
defer wg.Done()
srv := &GrpcServer{
monitoring: monitoring,
logger: log,
db: db,
cfg: cfg,
}
address := cfg.Host + ":" + cfg.Port
listener, err := net.Listen("tcp", address)
if err != nil {
log.Fatal("Can not start listening")
return
}
grpcServer = grpc.NewServer()
api.RegisterMonitoringServer(grpcServer, srv)
if err := grpcServer.Serve(listener); err != nil {
log.Fatalf("Can not accept incoming connection: %w", err)
return
}
}()
select {
case <-interrupt:
break
case <-ctx.Done():
break
}
log.Info("Shutdown signal accepted")
cancel()
if grpcServer != nil {
grpcServer.GracefulStop()
}
wg.Wait()
return nil
}
func (s *GrpcServer) GetInfo(req *api.Request, stream api.Monitoring_GetInfoServer) error {
stopChan := make(chan os.Signal, 1)
signal.Notify(stopChan, os.Interrupt)
ctx := stream.Context()
logger.Logger.Info("Client connected with params: ", zap.Int32("timeout", req.Time), zap.Int32("period", req.Period))
for {
d := time.Duration(int64(time.Second) * int64(req.Time))
select {
case <-time.After(d):
data := api.Result{}
timestamp := timestamppb.Now()
data.SystemValue = &api.SystemResponse{
ResponseTime: timestamp,
LoadAverageValue: float64((s.db.SystemTable.GetAverage(req.Period)).LoadAvg),
}
cpu := s.db.CPUTable.GetAverage(req.Period)
data.CpuValue = &api.CPUResponse{
ResponseTime: timestamp,
UserMode: float64(cpu.UserMode),
SystemMode: float64(cpu.SystemMode),
Idle: float64(cpu.Idle),
}
disks := s.db.DiskTable.GetAverage(req.Period)
diskIO := []*api.DiskIO{}
for k, v := range disks.Disk {
diskIO = append(diskIO, &api.DiskIO{
Device: k,
Tps: float64(v.Param1),
KbPerS: float64(v.Param2),
})
}
data.DiskValue = &api.DiskResponse{
ResponseTime: timestamp,
Io: diskIO,
Fs: []*api.DiskFS{},
}
err := stream.Send(&data)
if err != nil {
return err
}
case <-ctx.Done():
logger.Logger.Errorf("Stop streaming to client: $v", ctx.Err())
return ctx.Err()
case <-stopChan:
logger.Logger.Info("Close streaming")
return nil
}
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/checked"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/input"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: PrintPdfAsImageDefault,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Checking if the 'Print as image' option is set by default depending on the value of this policy",
Contacts: []string{
"cmfcmf@google.com", // Test author
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline", "informational"},
Params: []testing.Param{{
Fixture: fixture.ChromePolicyLoggedIn,
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Fixture: fixture.LacrosPolicyLoggedIn,
Val: browser.TypeLacros,
}},
Data: []string{"print_pdf_as_image_default.pdf"},
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.PrintPdfAsImageDefault{}, pci.VerifiedFunctionalityUI),
},
})
}
// PrintPdfAsImageDefault tests the PrintPdfAsImageDefault policy.
func PrintPdfAsImageDefault(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Reserve 10 seconds for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
url := fmt.Sprintf("%s/print_pdf_as_image_default.pdf", server.URL)
for _, param := range []struct {
name string
wantPrintAsImageChecked checked.Checked
policy *policy.PrintPdfAsImageDefault
}{
{
name: "enabled",
wantPrintAsImageChecked: checked.True,
policy: &policy.PrintPdfAsImageDefault{Val: true},
},
{
name: "disabled",
wantPrintAsImageChecked: checked.False,
policy: &policy.PrintPdfAsImageDefault{Val: false},
},
{
name: "unset",
wantPrintAsImageChecked: checked.False,
policy: &policy.PrintPdfAsImageDefault{Stat: policy.StatusUnset},
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Perform cleanup.
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Fatal("Failed to clean up: ", err)
}
// Update policies.
if err := policyutil.ServeAndVerify(ctx, fdms, cr, []policy.Policy{param.policy}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Setup browser based on the chrome type.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
conn, err := br.NewConn(ctx, url)
if err != nil {
s.Fatal("Failed to open url: ", err)
}
defer conn.Close()
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to get the keyboard: ", err)
}
defer kb.Close()
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
printAsImageCheckbox := nodewith.Role(role.CheckBox).Name("Print as image")
ui := uiauto.New(tconn)
if err := uiauto.Combine("open print preview",
// Wait until the PDF viewer is ready and displays the content of the PDF.
ui.WaitUntilExists(nodewith.Role(role.StaticText).Name("Hello World")),
kb.AccelAction("Ctrl+P"),
ui.WaitUntilExists(printAsImageCheckbox),
)(ctx); err != nil {
s.Fatal("Failed to open print preview: ", err)
}
nodeInfo, err := ui.Info(ctx, printAsImageCheckbox)
if err != nil {
s.Fatal("Failed to check state of 'Print as image' checkbox: ", err)
}
if nodeInfo.Checked != param.wantPrintAsImageChecked {
s.Errorf("Unexpected state of the 'Print as image' checkbox: got %s; want %s", nodeInfo.Checked, param.wantPrintAsImageChecked)
}
})
}
}
|
package socks5
import (
"io"
"fmt"
"net"
"strings"
"strconv"
)
const (
ConnectCommand = uint8(1)
BindCommand = uint8(2)
AssociateCommand = uint8(3)
ipv4Address = uint8(1)
fqdnAddress = uint8(3)
ipv6Address = uint8(4)
)
const (
successReply uint8 = iota
serverFailure
ruleFailure
networkUnreachable
hostUnreachable
connectionRefused
ttlExpired
commandNotSupported
addrTypeNotSupported
)
/**
SOCKS请求如下表所示:
+----+-----+-------+------+----------+----------+
|VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT |
+----+-----+-------+------+----------+----------+
| 1 | 1 | X'00' | 1 | Variable | 2 |
+----+-----+-------+------+----------+----------+
其中:
1. VER protocol version:X'05'
2. CMD
2.1 CONNECT X'01'
2.2 BIND X'02'
2.3 UDP ASSOCIATE X'03'
3 RSV RESERVED 保留字段
4 ATYP address type of following address
4.1 IP V4 address: X'01'
4.2 DOMAINNAME: X'03'
4.3 IP V6 address: X'04'
5 DST.ADDR desired destination address
6 DST.PORT desired destination port in network octet order
地址
在地址域(DST.ADDR,BND.ADDR)中,ATYP域详细说明了包含在该域内部的地址类型:
X'01'
该地址是IPv4地址,长4个八位组。
X'03'
该地址包含一个完全的域名。第一个八位组包含了后面名称的八位组的数目,没有中止的空八位组。
X'04'
该地址是IPv6地址,长16个八位组。
*/
type Request struct {
Version uint8
Command uint8
DestAddr *NetAddr
reader io.Reader
writer io.Writer
}
func NewRequest(reader io.Reader,writer io.Writer) (*Request, error) {
//读取version,cmd,RSV
header := []byte{0, 0, 0}
if _, err := io.ReadAtLeast(reader, header, 3); err != nil {
return nil, fmt.Errorf("Failed to get command version: %v", err)
}
if header[0] != sock5 {
return nil, fmt.Errorf("Unsupported command version: %v", header[0])
}
dest, err := readDestAddr(reader)
if err != nil {
return nil, err
}
request := &Request{
Version: sock5,
Command: header[1],
DestAddr: dest,
reader: reader,
writer: writer,
}
return request, nil
}
type NetAddr struct {
FQDN string
IP net.IP
Port int
}
func (a *NetAddr) String() string {
if a.FQDN != "" {
return fmt.Sprintf("%s (%s):%d", a.FQDN, a.IP, a.Port)
}
return fmt.Sprintf("%s:%d", a.IP, a.Port)
}
func (a NetAddr) Address() string {
if 0 != len(a.IP) {
return net.JoinHostPort(a.IP.String(), strconv.Itoa(a.Port))
}
return net.JoinHostPort(a.FQDN, strconv.Itoa(a.Port))
}
func readDestAddr(r io.Reader) (*NetAddr, error) {
netAddr := &NetAddr{}
// 获取地址类型ATYP
addrType := []byte{0}
if _, err := r.Read(addrType); err != nil {
return nil, err
}
// Handle on a per type basis
switch addrType[0] {
case ipv4Address:
addr := make([]byte, 4)
if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil {
return nil, err
}
netAddr.IP = net.IP(addr)
case ipv6Address:
addr := make([]byte, 16)
if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil {
return nil, err
}
netAddr.IP = net.IP(addr)
case fqdnAddress:
if _, err := r.Read(addrType); err != nil {
return nil, err
}
addrLen := int(addrType[0])
fqdn := make([]byte, addrLen)
if _, err := io.ReadAtLeast(r, fqdn, addrLen); err != nil {
return nil, err
}
netAddr.FQDN = string(fqdn)
default:
return nil, fmt.Errorf("Unrecognized address type")
}
// Read the port
port := []byte{0, 0}
if _, err := io.ReadAtLeast(r, port, 2); err != nil {
return nil, err
}
netAddr.Port = (int(port[0]) << 8) | int(port[1])
return netAddr, nil
}
// handleRequest is used for request processing after authentication
func (s *Server) handleRequest(req *Request) error {
// FQDN地址解析
dest := req.DestAddr
if dest.FQDN != "" {
addr, err := net.ResolveIPAddr("ip", dest.FQDN)
if err != nil {
//send reply
return err
}
dest.IP = addr.IP
}
// Switch on the command
/*
switch req.Command {
case ConnectCommand:
return s.handleConnect(req)
case BindCommand:
return s.handleBind(ctx, conn, req)
case AssociateCommand:
return s.handleAssociate(ctx, conn, req)
default:
if err := sendReply(conn, commandNotSupported, nil); err != nil {
return fmt.Errorf("Failed to send reply: %v", err)
}
return fmt.Errorf("Unsupported command: %v", req.Command)
}
*/
return s.handleConnect(req)
}
func (s *Server) handleConnect(req *Request) error {
target, err := net.Dial("tcp", req.DestAddr.Address())
if err != nil {
msg := err.Error()
resp := hostUnreachable
if strings.Contains(msg, "refused") {
resp = connectionRefused
} else if strings.Contains(msg, "network is unreachable") {
resp = networkUnreachable
}
if err := sendReply(req.writer, resp, nil); err != nil {
return fmt.Errorf("Failed to send reply: %v", err)
}
return fmt.Errorf("Connect to %v failed: %v", req.DestAddr, err)
}
defer target.Close()
// Send success
local := target.LocalAddr().(*net.TCPAddr)
bind := NetAddr{IP: local.IP, Port: local.Port}
if err := sendReply(req.writer, successReply, &bind); err != nil {
return fmt.Errorf("Failed to send reply: %v", err)
}
// Start proxying
errCh := make(chan error, 2)
go proxy(target, req.reader, errCh)
go proxy(req.writer, target, errCh)
// Wait
for i := 0; i < 2; i++ {
e := <-errCh
if e != nil {
// return from this function closes target (and conn).
return e
}
}
return nil
}
type closeWriter interface {
CloseWrite() error
}
func proxy(dst io.Writer, src io.Reader, errCh chan error) {
_, err := io.Copy(dst, src)
if tcpConn, ok := dst.(closeWriter); ok {
tcpConn.CloseWrite()
}
errCh <- err
}
/**
到SOCKS服务器的连接一经建立,客户机即发送SOCKS请求信息,并且完成认证商
议。服务器评估请求,返回一个回应如下表所示:
+----+-----+-------+------+----------+----------+
|VER | REP | RSV | ATYP | BND.ADDR | BND.PORT |
+----+-----+-------+------+----------+----------+
| 1 | 1 | X'00' | 1 | Variable | 2 |
+----+-----+-------+------+----------+----------+
其中:
o VER protocol version: X'05'
o REP Reply field:
o X'00' succeeded
o X'01' general SOCKS server failure
o X'02' connection not allowed by ruleset
o X'03' Network unreachable
o X'04' Host unreachable
o X'05' Connection refused
o X'06' TTL expired
o X'07' Command not supported
o X'08' Address type not supported
o X'09' to X'FF' unassigned
o RSV RESERVED
o ATYP address type of following address
o IP V4 address: X'01'
o DOMAINNAME: X'03'
o IP V6 address: X'04'
o BND.ADDR server bound address
o BND.PORT server bound port in network octet order
标志RESERVED(RSV)的地方必须设置为X'00'。
*/
func sendReply(w io.Writer, resp uint8, addr *NetAddr) error {
// Format the address
var addrType uint8
var addrBody []byte
var addrPort uint16
switch {
case addr == nil:
addrType = ipv4Address
addrBody = []byte{0, 0, 0, 0}
addrPort = 0
case addr.FQDN != "":
addrType = fqdnAddress
addrBody = append([]byte{byte(len(addr.FQDN))}, addr.FQDN...)
addrPort = uint16(addr.Port)
case addr.IP.To4() != nil:
addrType = ipv4Address
addrBody = []byte(addr.IP.To4())
addrPort = uint16(addr.Port)
case addr.IP.To16() != nil:
addrType = ipv6Address
addrBody = []byte(addr.IP.To16())
addrPort = uint16(addr.Port)
default:
return fmt.Errorf("Failed to format address: %v", addr)
}
// Format the message
msg := make([]byte, 6+len(addrBody))
msg[0] = sock5
msg[1] = resp
msg[2] = 0 // Reserved
msg[3] = addrType
copy(msg[4:], addrBody)
msg[4+len(addrBody)] = byte(addrPort >> 8)
msg[4+len(addrBody)+1] = byte(addrPort & 0xff)
// Send the message
_, err := w.Write(msg)
return err
} |
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package serial
import (
"context"
)
// Port represends a serial port and its basic operations.
type Port interface {
// Read bytes into buffer and return number of bytes read.
// Bytes already written to the port shall be moved into buf, up to its size.
Read(ctx context.Context, buf []byte) (n int, err error)
// Write bytes from buffer and return number of bytes written.
// It returns a non-nil error when n != len(b), nil otherwise.
Write(ctx context.Context, buf []byte) (n int, err error)
// Flush un-read/written bytes.
Flush(ctx context.Context) error
// Close closes the port.
Close(ctx context.Context) error
}
|
package base
type BytecodeReader struct {
code []byte
pc int
}
func (br *BytecodeReader) Reset(code []byte, pc int) {
br.code = code
br.pc = pc
}
func (br *BytecodeReader) ReadUint8() uint8 {
i := br.code[br.pc]
br.pc++
return i
}
func (br *BytecodeReader) ReadInt8() int8 {
return int8(br.ReadUint8())
}
func (br *BytecodeReader) ReadUint16() uint16 {
byte1 := uint16(br.ReadUint8())
byte2 := uint16(br.ReadUint8())
return (byte1 << 8) | byte2
}
func (br *BytecodeReader) ReadInt16() int16 {
return int16(br.ReadUint16())
}
func (br *BytecodeReader) ReadInt32() int32 {
byte1 := int32(br.ReadUint8())
byte2 := int32(br.ReadUint8())
byte3 := int32(br.ReadUint8())
byte4 := int32(br.ReadUint8())
return (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4
}
func (br *BytecodeReader) SkipPadding() {
for br.pc%4 != 0 {
br.ReadUint8()
}
}
func (br *BytecodeReader) ReadInt32s(n int32) []int32 {
ints := make([]int32, n)
for i := range ints {
ints[i] = br.ReadInt32()
}
return ints
}
func (br *BytecodeReader) PC() int {
return br.pc
}
|
package main
import (
"fmt"
"log"
"github.com/xuperchain/xupercore/example/xchain/cmd/client/cmd"
"github.com/xuperchain/xupercore/example/xchain/cmd/client/common/global"
xdef "github.com/xuperchain/xupercore/example/xchain/common/def"
"github.com/spf13/cobra"
)
var (
Version = ""
BuildTime = ""
CommitID = ""
)
func main() {
rootCmd, err := NewClientCommand()
if err != nil {
log.Printf("new client command failed.err:%v", err)
return
}
if err = rootCmd.Execute(); err != nil {
log.Printf("command exec failed.err:%v", err)
return
}
}
func NewClientCommand() (*cobra.Command, error) {
rootCmd := &cobra.Command{
Use: xdef.CmdLineName + " <command> [arguments]",
Short: xdef.CmdLineName + " is a blockchain terminal client.",
Long: xdef.CmdLineName + " is a blockchain terminal client.",
SilenceUsage: true,
SilenceErrors: true,
Example: xdef.CmdLineName + " tx query [txid]",
}
// cmd version
rootCmd.AddCommand(GetVersionCmd().GetCmd())
// contract client
rootCmd.AddCommand(cmd.GetContractCmd().GetCmd())
// tx client
rootCmd.AddCommand(cmd.GetTxCmd().GetCmd())
// block client
rootCmd.AddCommand(cmd.GetBlockCmd().GetCmd())
// blockchain client
rootCmd.AddCommand(cmd.GetChainCmd().GetCmd())
// 添加全局Flags
rootFlag := rootCmd.PersistentFlags()
rootFlag.StringVarP(&global.GFlagConf, "conf", "c", "./conf/client.yaml", "client config")
rootFlag.StringVarP(&global.GFlagCrypto, "crypto", "", "default", "crypto type")
rootFlag.StringVarP(&global.GFlagHost, "host", "H", "127.0.0.1:36101", "node host")
rootFlag.StringVarP(&global.GFlagKeys, "keys", "", "./data/keys", "account address")
rootFlag.StringVarP(&global.GFlagBCName, "name", "", xdef.DefChainName, "chain name")
return rootCmd, nil
}
type versionCmd struct {
global.BaseCmd
}
func GetVersionCmd() *versionCmd {
versionCmdIns := new(versionCmd)
subCmd := &cobra.Command{
Use: "version",
Short: "view process version information.",
Example: xdef.CmdLineName + " version",
Run: func(cmd *cobra.Command, args []string) {
versionCmdIns.PrintVersion()
},
}
versionCmdIns.SetCmd(subCmd)
return versionCmdIns
}
func (t *versionCmd) PrintVersion() {
fmt.Printf("%s-%s %s\n", Version, CommitID, BuildTime)
}
|
package _5_Visitor_Pattern
import "testing"
//步骤 5
//使用 ComputerPartDisplayVisitor 来显示 Computer 的组成部分。
func TestVisitorPattern(t *testing.T) {
computer := newComputer()
wantRet := "Displaying Mouse.Displaying Monitor.Displaying Keyboard.Displaying Computer."
if gotRet := computer.accept(&ComputerPartDisplayVisitor{}); gotRet != wantRet {
t.Errorf("visit() = %v, want %v", gotRet, wantRet)
}
}
|
package filter
import (
"net/http"
"testing"
"time"
"github.com/caddyserver/caddy"
)
func TestSetup(t *testing.T) {
c := caddy.NewTestController("dns", `filter`)
if err := setup(c); err != nil {
t.Fatalf("Expected no errors, but got: %v", err)
}
c = caddy.NewTestController("dns", `filter hello`)
if err := setup(c); err == nil {
t.Fatalf("Expected errors, but got: %v", err)
}
}
func TestIp2Interval(t *testing.T) {
ip, mask := "1.0.2.0", "23"
ipInfo := IP2IPInterval(ip, mask)
t.Log(ipInfo)
}
func TestUpdateLocalIP(t *testing.T) {
// negligible data race
duration := 2 * time.Second
f := Filter{}
t.Log(f.localIP)
go f.localIPUpdator(duration)
time.Sleep(duration * 2)
t.Log(f.localIP)
}
func TestExtractIP(t *testing.T) {
filename := "/home/lob/go/src/github.com/lobshunter86/filter/iptable/china_telecom.txt"
ipinfos, err := extractIPs(filename)
if err != nil {
t.Fatal(err)
}
t.Log(ipinfos)
}
func TestDebug(t *testing.T) {
resp, err := http.Get("https://api.ipify.org?format=json")
if err != nil {
t.Fatal(err)
}
t.Log(resp)
}
|
// Copyright 2013 Rodrigo Moraes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package htmlfilter
import (
"bytes"
"exp/html"
"testing"
)
func TestNextTextFilter(t *testing.T) {
src := `<html>
<p>
<a name="foo"/>
<small>
<font face="Arial">
Foo
<sup>
<u>
<b>
Bar
</b>
</u>
</sup>
</font>
</small>
<a href="/path/to/somewhere">
<i>
Baz
</i>
</a>
</p>
<p>
<span>
Ding
</span>
</p>
</html>`
expected := []string{
"<p><a/>Foo<sup>Bar</sup><a>Baz</a></p>",
"<p>Ding</p>",
}
r := bytes.NewBufferString(src)
d := html.NewTokenizer(r)
for _, v := range expected {
node, err := NextTextFilter(d, "p", "a", "sup")
if err != nil {
t.Fatal(err)
}
if node.String() != v {
t.Errorf("expected %q, got %q", v, node.String())
}
}
}
func TestMalformed(t *testing.T) {
// some mal-formed html
type test struct {
src string
exp string
}
tests := []test{
{`<p><span>Foo</i></p>`, `<html><head></head><body><p><span>Foo</span></p></body></html>`},
}
for _, test := range tests {
r := bytes.NewBufferString(test.src)
nodes, err := html.ParseFragment(r, nil)
if err != nil {
t.Fatal(err)
}
b := new(bytes.Buffer)
for _, v := range nodes {
err := html.Render(b, v)
if err != nil {
t.Error(err)
}
}
if b.String() != test.exp {
t.Errorf("expected %q, got %q", test.exp, b)
}
}
}
|
package main
import (
"fmt"
"io"
"net/http"
)
func main() {
resp, _ := http.Get("http://google.com")
m := memoryBuffer{}
fmt.Println("Address of m", &m)
io.Copy(&m, resp.Body)
//fmt.Println(m.message)
}
type memoryBuffer struct {
message string
}
func (c *memoryBuffer) Write(p []byte) (n int, err error) {
c.message += string(p)
return len(p), nil
}
|
package irc_color
type Colour int
// Colours
const (
White Colour = iota
Black
Blue
Green
Red
Brown
Purple
Orange //, Olive
Yellow //
LightGreen //, Lime
Teal //, LightCyan
Cyan //, Aqua
LightBlue //
Pink //, Fuchsia
Grey //, Gray
LightGrey //, LightGray, Silver
Violet = Purple
Olive = Orange
Lime = LightGreen
LightCyan = Teal
Aqua = Cyan
Fuchsia = Pink
Gray = Grey
LightGray = LightGrey
Silver = LightGrey
)
const colourChar = "\x03"
|
package crudcontracts
import (
"context"
"github.com/adamluzsi/frameless/internal/suites"
"github.com/adamluzsi/frameless/ports/comproto"
"github.com/adamluzsi/frameless/ports/crud"
"testing"
)
func SuiteFor[
Entity, ID any,
Resource suiteSubjectResource[Entity, ID],
](makeSubject func(testing.TB) SuiteSubject[Entity, ID, Resource]) Suite {
T := any(*new(Resource))
var contracts suites.Suites
if _, ok := T.(crud.Purger); ok {
contracts = append(contracts, Purger[Entity, ID](func(tb testing.TB) PurgerSubject[Entity, ID] {
sub := makeSubject(tb)
return PurgerSubject[Entity, ID]{
Resource: any(sub.Resource).(purgerSubjectResource[Entity, ID]),
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}))
}
contracts = append(contracts,
Creator[Entity, ID](func(tb testing.TB) CreatorSubject[Entity, ID] {
sub := makeSubject(tb)
return CreatorSubject[Entity, ID]{
Resource: sub.Resource,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
SupportIDReuse: sub.CreateSupportIDReuse,
SupportRecreate: sub.CreateSupportRecreate,
}
}),
ByIDFinder[Entity, ID](func(tb testing.TB) ByIDFinderSubject[Entity, ID] {
sub := makeSubject(tb)
return ByIDFinderSubject[Entity, ID]{
Resource: sub.Resource,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}),
ByIDDeleter[Entity, ID](func(tb testing.TB) ByIDDeleterSubject[Entity, ID] {
sub := makeSubject(tb)
return ByIDDeleterSubject[Entity, ID]{
Resource: sub.Resource,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}),
OnePhaseCommitProtocol[Entity, ID](func(tb testing.TB) OnePhaseCommitProtocolSubject[Entity, ID] {
sub := makeSubject(tb)
if sub.CommitManager == nil {
tb.Skip("SuiteSubject.CommitManager is not supplied")
}
return OnePhaseCommitProtocolSubject[Entity, ID]{
Resource: sub.Resource,
CommitManager: sub.CommitManager,
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}),
)
if _, ok := T.(crud.Updater[Entity]); ok {
contracts = append(contracts, Updater[Entity, ID](func(tb testing.TB) UpdaterSubject[Entity, ID] {
sub := makeSubject(tb)
return UpdaterSubject[Entity, ID]{
Resource: any(sub.Resource).(updaterSubjectResource[Entity, ID]),
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}))
}
if _, ok := T.(crud.AllFinder[Entity]); ok {
contracts = append(contracts, AllFinder[Entity, ID](func(tb testing.TB) AllFinderSubject[Entity, ID] {
sub := makeSubject(tb)
return AllFinderSubject[Entity, ID]{
Resource: any(sub.Resource).(allFinderSubjectResource[Entity, ID]),
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}))
}
if _, ok := T.(crud.AllDeleter); ok {
contracts = append(contracts, AllDeleter[Entity, ID](func(tb testing.TB) AllDeleterSubject[Entity, ID] {
sub := makeSubject(tb)
return AllDeleterSubject[Entity, ID]{
Resource: any(sub.Resource).(allDeleterSubjectResource[Entity, ID]),
MakeContext: sub.MakeContext,
MakeEntity: sub.MakeEntity,
}
}))
}
return contracts
}
type SuiteSubject[
Entity, ID any,
Resource suiteSubjectResource[Entity, ID],
] struct {
Resource Resource
CommitManager comproto.OnePhaseCommitProtocol
MakeContext func() context.Context
MakeEntity func() Entity
CreateSupportIDReuse bool
CreateSupportRecreate bool
}
type suiteSubjectResource[Entity, ID any] interface {
crud.Creator[Entity]
crud.ByIDFinder[Entity, ID]
crud.ByIDDeleter[ID]
}
type Suite suites.Suite
|
package prettyfyne
import (
"gopkg.in/yaml.v2"
"image/color"
)
// PrettyThemeConfig is used for serialization and loading from yaml
type PrettyThemeConfig struct {
BackgroundColor *color.RGBA `yaml:"background_color,omitempty"`
ButtonColor *color.RGBA `yaml:"button_color,omitempty"`
DisabledButtonColor *color.RGBA `yaml:"disabled_button_color,omitempty"`
HyperlinkColor *color.RGBA `yaml:"hyperlink_color,omitempty"`
TextColor *color.RGBA `yaml:"text_color,omitempty"`
DisabledTextColor *color.RGBA `yaml:"disabled_text_color,omitempty"`
IconColor *color.RGBA `yaml:"icon_color,omitempty"`
DisabledIconColor *color.RGBA `yaml:"disabled_icon_color,omitempty"`
PlaceHolderColor *color.RGBA `yaml:"place_holder_color,omitempty"`
PrimaryColor *color.RGBA `yaml:"primary_color,omitempty"`
HoverColor *color.RGBA `yaml:"hover_color,omitempty"`
FocusColor *color.RGBA `yaml:"focus_color,omitempty"`
ScrollBarColor *color.RGBA `yaml:"scroll_bar_color,omitempty"`
ShadowColor *color.RGBA `yaml:"shadow_color,omitempty"`
TextSize int `yaml:"text_size,omitempty"`
TextFontPath string `yaml:"text_font_path,omitempty"`
TextFont string `yaml:"text_font,omitempty"`
TextBoldFontPath string `yaml:"text_bold_font_path,omitempty"`
TextBoldFont string `yaml:"text_bold_font,omitempty"`
TextItalicFontPath string `yaml:"text_italic_font_path,omitempty"`
TextItalicFont string `yaml:"text_italic_font,omitempty"`
TextBoldItalicFontPath string `yaml:"text_bold_italic_font_path,omitempty"`
TextBoldItalicFont string `yaml:"text_bold_italic_font,omitempty"`
TextMonospaceFontPath string `yaml:"text_monospace_font_path,omitempty"`
TextMonospaceFont string `yaml:"text_monospace_font,omitempty"`
Padding int `yaml:"padding,omitempty"`
IconInlineSize int `yaml:"icon_inline_size,omitempty"`
ScrollBarSize int `yaml:"scroll_bar_size,omitempty"`
ScrollBarSmallSize int `yaml:"scroll_bar_small_size,omitempty"`
}
// UnmarshalYaml will override the default theme settings with what is stored in a yaml file. All fields are optional
// and will fall back to the default if not set. It will always return a populated theme, even if it cannot load fonts.
func UnmarshalYaml(y []byte) (pt *PrettyTheme, fontsLoaded bool, err error) {
pt = DefaultTheme()
c := PrettyThemeConfig{}
err = yaml.Unmarshal(y, &c)
if err != nil {
return
}
fontsLoaded = true
//switch {
if c.BackgroundColor != nil {
pt.BackgroundColor = c.BackgroundColor
}
if c.ButtonColor != nil {
pt.ButtonColor = c.ButtonColor
}
if c.DisabledButtonColor != nil {
pt.DisabledButtonColor = c.DisabledButtonColor
}
if c.HyperlinkColor != nil {
pt.HyperlinkColor = c.HyperlinkColor
}
if c.TextColor != nil {
pt.TextColor = c.TextColor
}
if c.DisabledTextColor != nil {
pt.DisabledTextColor = c.DisabledTextColor
}
if c.IconColor != nil {
pt.IconColor = c.IconColor
}
if c.DisabledIconColor != nil {
pt.DisabledIconColor = c.DisabledIconColor
}
if c.PlaceHolderColor != nil {
pt.PlaceHolderColor = c.PlaceHolderColor
}
if c.PrimaryColor != nil {
pt.PrimaryColor = c.PrimaryColor
}
if c.HoverColor != nil {
pt.HoverColor = c.HoverColor
}
if c.FocusColor != nil {
pt.FocusColor = c.FocusColor
}
if c.ScrollBarColor != nil {
pt.ScrollBarColor = c.ScrollBarColor
}
if c.ShadowColor != nil {
pt.ShadowColor = c.ShadowColor
}
if c.TextSize != 0 {
pt.TextSize = c.TextSize
}
if c.Padding != 0 {
pt.Padding = c.Padding
}
if c.IconInlineSize != 0 {
pt.IconInlineSize = c.IconInlineSize
}
if c.ScrollBarSize != 0 {
pt.ScrollBarSize = c.ScrollBarSize
}
if c.ScrollBarSmallSize != 0 {
pt.ScrollBarSmallSize = c.ScrollBarSmallSize
}
// handle loading of fonts
if c.TextFontPath != "" || c.TextFont != "" {
pt.TextFont, err = LoadFont(c.TextFontPath, c.TextFont, "NotoSans-Regular.ttf")
if err != nil {
fontsLoaded = false
}
}
if c.TextBoldFontPath != "" || c.TextBoldFont != "" {
pt.TextBoldFont, err = LoadFont(c.TextBoldFontPath, c.TextBoldFont, "NotoSans-Bold.ttf")
if err != nil {
fontsLoaded = false
}
}
if c.TextBoldItalicFontPath != "" || c.TextBoldItalicFont != "" {
pt.TextBoldItalicFont, err = LoadFont(c.TextBoldItalicFontPath, c.TextBoldItalicFont, "NotoSans-BoldItalic.ttf")
if err != nil {
fontsLoaded = false
}
}
if c.TextItalicFontPath != "" || c.TextItalicFont != "" {
pt.TextItalicFont, err = LoadFont(c.TextItalicFontPath, c.TextItalicFont, "NotoSans-Italic.ttf")
if err != nil {
fontsLoaded = false
}
}
if c.TextMonospaceFontPath != "" || c.TextMonospaceFont != "" {
pt.TextMonospaceFont, err = LoadFont(c.TextMonospaceFontPath, c.TextMonospaceFont, "NotoMono-Regular.ttf")
if err != nil {
fontsLoaded = false
}
}
return pt, fontsLoaded, nil
}
|
package drivers
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"gorm.io/driver/mysql"
"gorm.io/driver/postgres"
"gorm.io/driver/sqlserver"
_ "gorm.io/driver/clickhouse" // for tests
)
func TestDialect(t *testing.T) {
const (
myDSN = "root@tcp(0.0.0.0:3306)/test?parseTime=true"
pgDSN = "postgres://root:password@0.0.0.0:5432/test"
msDSN = "sqlserver://sa:password@0.0.0.0:5432?database=test"
clickDSN = "tcp://localhost:9000?database=gorm&username=gorm&password=gorm&read_timeout=10&write_timeout=20"
)
myCtx := ConfigWithContext(context.Background(), mysql.Config{})
pgCtx := ConfigWithContext(context.Background(), postgres.Config{})
msCtx := ConfigWithContext(context.Background(), sqlserver.Config{})
tests := []struct {
drv Driver
ctx context.Context
dsn string
name string
Err assert.ErrorAssertionFunc
}{
{MySQL, nil, "", "", assert.Error},
{MySQL, nil, "\n", "", assert.Error},
{MySQL, nil, myDSN, "test", assert.NoError},
{MySQL, myCtx, "", "", assert.NoError},
{Postgres, nil, pgDSN, "test", assert.NoError},
{Postgres, pgCtx, "", "", assert.NoError},
{SQLServer, nil, msDSN, "test", assert.NoError},
{SQLServer, msCtx, "", "", assert.NoError},
{SQLite, nil, "test.db", "test", assert.NoError},
{"clickhouse", nil, clickDSN, "", assert.NoError},
}
for _, test := range tests {
d, err := NewDialect(test.ctx, test.drv, test.dsn)
test.Err(t, err)
if err == nil {
assert.Equal(t, test.name, d.DBName())
}
}
}
|
package parser
import (
"fmt"
"strconv"
"strings"
"unicode"
)
type Member struct {
Name string
Coeff float64
Exp int
Operand string
}
type Equation struct {
LMembers []Member
RMember Member
}
func parseMember(str string, memberPos int) (Member, error) {
var member Member
var err error
str = strings.Trim(str, " ")
fSplit := strings.Split(str, "^")
if len(fSplit) == 2 {
member.Name = string(fSplit[0][len(fSplit[0])-1])
Exp, err := strconv.Atoi(string(fSplit[1][0]))
if err != nil {
return member, fmt.Errorf("Error parsing member n°%d: Exp is not a int", memberPos)
}
member.Exp = Exp
}
switch string(str[0]) {
case "-":
member.Operand = "-"
default:
member.Operand = "+"
}
i := 0
for unicode.IsDigit(rune(str[i])) || string(str[i]) == "+" || string(str[i]) == "-" || string(str[i]) == " " || string(str[i]) == "." {
i++
}
Coeff, err := strconv.ParseFloat(strings.Replace(string(str[0:i]), " ", "", -1), 64)
if err != nil {
return member, fmt.Errorf("Error parsing member n°%d: Coeff is not a int", memberPos)
}
member.Coeff = Coeff
return member, nil
}
func getOperandPos(side string) []int {
var positions []int
positions = append(positions, 0)
for i, c := range side {
if string(c) == "+" || string(c) == "-" {
positions = append(positions, i)
}
}
positions = append(positions, len(side)-1)
return positions
}
func ParseEquation(equation string) (Equation, error) {
var members []Member
sides := strings.Split(equation, "=")
if len(sides) > 2 || len(sides) == 1 {
return Equation{}, fmt.Errorf("Equation malformatted")
}
positions := getOperandPos(sides[0])
i := 0
for i < len(positions)-1 {
var err error
var member Member
member, err = parseMember(string(sides[0][positions[i]:positions[i+1]]), i+1)
if err != nil {
return Equation{}, err
}
members = append(members, member)
i++
}
rightMember, _ := parseMember(string(sides[1]), -1)
return Equation{LMembers: members, RMember: rightMember}, nil
}
|
// Copyright 2020 SEQSENSE, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials"
ist "github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling"
ist_types "github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling/types"
"github.com/seqsense/aws-iot-device-sdk-go/v6/internal/ioterr"
)
func TestAPI(t *testing.T) {
var wg sync.WaitGroup
defer wg.Wait()
tunnelHandler := NewTunnelHandler()
apiHandler := NewAPIHandler(tunnelHandler, nil)
mux := http.NewServeMux()
mux.Handle("/", apiHandler)
mux.Handle("/tunnel", tunnelHandler)
s := &http.Server{
Handler: mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
defer func() {
if err := s.Close(); err != nil {
t.Error(err)
}
}()
ln, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatal(err)
}
wg.Add(1)
go func() {
defer wg.Done()
switch err := s.Serve(ln); err {
case http.ErrServerClosed, nil:
default:
t.Error(err)
}
}()
cfg := aws.Config{
Region: "nothing",
EndpointResolver: newEndpointForFunc(ln.Addr().(*net.TCPAddr).Port),
Credentials: credentials.NewStaticCredentialsProvider(
"ASIAZZZZZZZZZZZZZZZZ",
"0000000000000000000000000000000000000000",
"",
),
}
api := ist.NewFromConfig(cfg)
out, err := api.OpenTunnel(context.TODO(), &ist.OpenTunnelInput{
Description: aws.String("desc"),
DestinationConfig: &ist_types.DestinationConfig{
Services: []string{
"ssh",
},
ThingName: aws.String("thing"),
},
})
if err != nil {
t.Fatal(err)
}
t.Logf("%v", out)
}
func TestAPI_Validate(t *testing.T) {
h := apiHandler{}
var err error
var ie *ioterr.Error
_, err = h.openTunnel(&ist.OpenTunnelInput{
Tags: []ist_types.Tag{},
})
if !errors.As(err, &ie) {
t.Errorf("Expected error type: %T, got: %T", ie, err)
}
if !errors.Is(err, errInvalidRequest) {
t.Errorf("Expected error: '%v', got: '%v'", errInvalidRequest, err)
}
_, err = h.closeTunnel(&ist.CloseTunnelInput{})
if !errors.As(err, &ie) {
t.Errorf("Expected error type: %T, got: %T", ie, err)
}
if !errors.Is(err, errInvalidRequest) {
t.Errorf("Expected error: '%v', got: '%v'", errInvalidRequest, err)
}
}
func newEndpointForFunc(port int) aws.EndpointResolver {
return aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
return aws.Endpoint{
URL: fmt.Sprintf("http://localhost:%d", port),
PartitionID: "clone",
SigningRegion: region,
SigningName: service,
SigningMethod: "v4",
}, nil
})
}
|
package ridemerge
import "net/http"
// getSession returns the current session if it exists, otherwise an empty
// session is returned.
func GetSession(r *http.Request) Session {
cUser, err := r.Cookie("session-id")
var session Session
if err == nil {
session.Email = cUser.Value
session.LoggedIn = true
}
return session
}
|
package main
import "fmt"
func vard(b ...int) (sum int) {
sum = 0
for _, val := range b {
sum += val
}
return
}
func checkIn(n int, nums ...int) bool {
var answer bool
fmt.Printf("type of nums is %T\n", nums)
for ind, val := range nums {
if n == val {
fmt.Println("Element",n," was found on position",ind,"in",nums )
answer = true
}
}
if !answer {
fmt.Println("Not found")
}
return answer
}
func change(s ...string) {
s[0] = "Go"
s = append(s, "playground") // new array with cap * 2 inside chande gunc
fmt.Println(s)
}
func main() {
fmt.Println(vard(1, 2, 3, 4, 5, 6, 7))
fmt.Println("go")
fmt.Println(checkIn(2, 2,3,4,5,6,2,1,4,2,2,10))
fmt.Println(checkIn(2))
sample := []int{1,2,3}
fmt.Println(checkIn(1, sample...))
welcome := []string{"hello", "world"}
change(welcome...)
fmt.Println(welcome)
}
|
package benchmarking
import (
"fmt"
"testing"
)
func TestGreet(t *testing.T) {
s := Greet("josh")
if s != "welcome sir!, josh" {
t.Errorf("expected: 'welcome sir!, josh' | got: %v", s)
}
}
func ExampleGreet() {
fmt.Println(Greet("josh"))
// Output:
// welcome sir!, josh
}
func BenchmarkGreet(b *testing.B) {
for i := 0; i < b.N; i++ {
Greet("josh")
}
}
// $ go test -cover :: analysis of how many statements are covered by the test suite
// $ go test -coverprofile=c.out :: identifies coverage holes
// $ go tool cover -html=c.out :: opens up a visual for full coverage analysis
|
package galaxy
//galacticWords are know words in galaxy
var galacticWords = map[string]string{
"glob": "I",
"prok": "V",
"pish": "X",
"tegj": "L",
}
//ConvertWords from galaxy to roman
func ConvertWords(word string) string {
if val, key := galacticWords[word]; key {
return val
} else {
return "Word Not Found"
}
}
|
package main
type Tenant struct {
Id int `json:"id"`
DatabaseId string `json:"databaseId"`
}
type TenantMember struct {
TenantId int
UserId string
}
type TenantStore interface {
GetTenantsForUser(userId string) ([]Tenant, error)
CreateTenant(tenantId, userId string) (*Tenant, error)
}
|
package main
import (
"context"
"flag"
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/lillilli/geth_contract/config"
"github.com/lillilli/geth_contract/eth"
"github.com/lillilli/geth_contract/http"
"github.com/lillilli/geth_contract/session"
"github.com/lillilli/logger"
"github.com/lillilli/vconf"
)
var (
configFile = flag.String("config", "", "set service config file")
)
const updateTxsStateInterval = 10 * time.Second
func main() {
flag.Parse()
cfg := &config.Config{}
if err := vconf.InitFromFile(*configFile, cfg); err != nil {
fmt.Printf("unable to load config: %s\n", err)
os.Exit(1)
}
logger.Init(cfg.Log)
log := logger.NewLogger("api")
if err := runService(cfg, log); err != nil {
log.Errorf("Run service error: %v", err)
os.Exit(1)
}
}
func runService(cfg *config.Config, log logger.Logger) error {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
contractClient, err := eth.NewContractClient(cfg.PrivateKey, cfg.EthNodeURL, cfg.ContractAddress)
if err != nil {
return err
}
go startUpdateTxsState(ctx, log, contractClient)
userSessionsStore := session.NewUserSessionStore()
httpServer := http.NewServer(cfg.HTTP, contractClient, userSessionsStore)
if err := httpServer.Start(); err != nil {
return err
}
<-signals
close(signals)
return httpServer.Stop()
}
func startUpdateTxsState(ctx context.Context, log logger.Logger, contractClient eth.ContractClient) {
ticker := time.NewTicker(updateTxsStateInterval)
if err := contractClient.UpdateTxsStates(); err != nil {
log.Errorf("Updating txs states failed: %v", err)
}
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if err := contractClient.UpdateTxsStates(); err != nil {
log.Errorf("Updating txs states failed: %v", err)
}
}
}
}
|
package controllers
import (
ketov1alpha1 "github.com/ory/keto-maester/api/v1alpha1"
"github.com/ory/keto-maester/keto"
)
const (
FinalizerName = "finalizer.ory.keto.sh"
)
type KetoConfiger interface {
GetKeto() ketov1alpha1.Keto
}
type KetoClientMakerFunc func(KetoConfiger) (KetoClientInterface, error)
type clientMapKey struct {
url string
port int
endpoint string
}
type KetoClientInterface interface {
GetORYAccessControlPolicy(flavor, id string) (*keto.ORYAccessControlPolicyJSON, bool, error)
ListORYAccessControlPolicy(flavor string) ([]*keto.ORYAccessControlPolicyJSON, error)
PutORYAccessControlPolicy(flavor string, o *keto.ORYAccessControlPolicyJSON) (*keto.ORYAccessControlPolicyJSON, error)
DeleteORYAccessControlPolicy(flavor, id string) error
GetORYAccessControlPolicyRole(flavor, id string) (*keto.ORYAccessControlPolicyRoleJSON, bool, error)
ListORYAccessControlPolicyRole(flavor string) ([]*keto.ORYAccessControlPolicyRoleJSON, error)
PutORYAccessControlPolicyRole(flavor string, o *keto.ORYAccessControlPolicyRoleJSON) (*keto.ORYAccessControlPolicyRoleJSON, error)
DeleteORYAccessControlPolicyRole(flavor, id string) error
}
// Helper functions to check and remove string from a slice of strings.
func containsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
func removeString(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}
|
/*
The card module containing the card CRUD operation and relationship CRUD.
model.go: definition of orm based data model
routers.go: router binding and core logic
serializers.go: definition the schema of return data
validators.go: definition the validator of form data
*/
package card
|
package main
import (
"fmt"
"math"
)
func drawingBook(total, page int) (turns int) {
if page == 1 || (total%2 == 0 && page == total) || (total%2 != 0 && page >= total-1) {
turns = 0
} else {
var x float64
if page <= total/2 {
x = float64(page-1) / 2.0
x = math.Ceil(x)
} else if total%2 != 0 {
x = float64(total-page) / 2.0
} else {
x = float64(total-page) / 2.0
x = math.Ceil(x)
}
turns = int(x)
}
return turns
}
func main() {
fmt.Println(drawingBook(6, 2))
fmt.Println(drawingBook(5, 4))
fmt.Println(drawingBook(70809, 46090))
fmt.Println(drawingBook(6, 5))
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package initializer
import (
"context"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/initializer/build"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/initializer/deploy"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/initializer/render"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/warnings"
)
var (
// for testing
getWd = os.Getwd
)
func generateSkaffoldConfig(b build.Initializer, r render.Initializer, d deploy.Initializer) *latest.SkaffoldConfig {
// if we're here, the user has no skaffold yaml so we need to generate one
// if the user doesn't have any k8s yamls, generate one for each dockerfile
log.Entry(context.TODO()).Info("generating skaffold config")
name, err := suggestConfigName()
if err != nil {
warnings.Printf("Couldn't generate default config name: %s", err.Error())
}
renderConfig, profiles := r.RenderConfig()
deployConfig := d.DeployConfig()
buildConfig, portForward := b.BuildConfig()
return &latest.SkaffoldConfig{
APIVersion: latest.Version,
Kind: "Config",
Metadata: latest.Metadata{
Name: name,
},
Pipeline: latest.Pipeline{
Build: buildConfig,
Render: renderConfig,
Deploy: deployConfig,
PortForward: portForward,
},
Profiles: profiles,
}
}
func suggestConfigName() (string, error) {
cwd, err := getWd()
if err != nil {
return "", err
}
base := filepath.Base(cwd)
// give up for edge cases
if base == "." || base == string(filepath.Separator) {
return "", nil
}
return canonicalizeName(base), nil
}
// canonicalizeName converts a given string to a valid k8s name string.
// See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names for details
func canonicalizeName(name string) string {
forbidden := regexp.MustCompile(`[^-.a-z]+`)
canonicalized := forbidden.ReplaceAllString(strings.ToLower(name), "-")
if len(canonicalized) <= 253 {
return canonicalized
}
return canonicalized[:253]
}
|
package test
import (
"context"
"sync"
"testing"
format "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-merkledag"
"github.com/ipfs/go-unixfs/io"
testu "github.com/ipfs/go-unixfs/test"
"github.com/stretchr/testify/require"
)
type Morpher func(format.Node) (format.Node, error)
// FSDagger is a test helper useful to build various UnixFS DAGs.
type FSDagger struct {
Morpher Morpher
t testing.TB
ctx context.Context
dag format.DAGService
nodes map[string]*FSDaggerNode
l sync.Mutex
}
func NewFSDagger(t testing.TB, ctx context.Context, dag format.DAGService) *FSDagger {
return &FSDagger{t: t, ctx: ctx, dag: dag, nodes: make(map[string]*FSDaggerNode)}
}
func (d *FSDagger) RandNode(name string) *FSDaggerNode {
data, nd := testu.GetRandomNode(d.t, d.dag, 10000, testu.UseCidV1)
return d.addNode(&FSDaggerNode{d: d, node: nd, name: name, Data: data})
}
func (d *FSDagger) NewNode(name string, data []byte) *FSDaggerNode {
return d.addNode(&FSDaggerNode{
d: d,
node: testu.GetNode(d.t, d.dag, data, testu.NodeOpts{Prefix: merkledag.V1CidPrefix()}),
name: name,
Data: data,
})
}
func (d *FSDagger) NewDir(name string, es ...*FSDaggerNode) *FSDaggerNode {
dir := io.NewDirectory(d.dag)
dir.SetCidBuilder(merkledag.V1CidPrefix())
for _, e := range es {
err := dir.AddChild(d.ctx, e.name, e.node)
require.NoError(d.t, err)
}
nd, err := dir.GetNode()
require.NoError(d.t, err)
err = d.dag.Add(d.ctx, nd)
require.NoError(d.t, err)
return d.addNode(&FSDaggerNode{d: d, node: nd, name: name, isDir: true})
}
func (d *FSDagger) Node(name string) *FSDaggerNode {
d.l.Lock()
defer d.l.Unlock()
e, ok := d.nodes[name]
if !ok {
d.t.Fatal("dagger: entry not found")
}
return e
}
func (d *FSDagger) Remove(name string) {
e := d.Node(name)
err := d.dag.Remove(d.ctx, e.node.Cid())
require.NoError(d.t, err)
}
func (d *FSDagger) addNode(e *FSDaggerNode) *FSDaggerNode {
d.l.Lock()
defer d.l.Unlock()
_, ok := d.nodes[e.name]
if ok {
d.t.Fatal("dagger: entry name is used")
}
if d.Morpher != nil {
m, err := d.Morpher(e.node)
if err != nil {
d.t.Fatalf("dagger: morpher failed with: %s", err)
}
e.node = m
}
d.nodes[e.name] = e
return e
}
|
package inventory
import (
"encoding/json"
"net/http"
"net/url"
"shopping-cart/pkg/controllers/common"
"shopping-cart/pkg/service"
"shopping-cart/types"
"shopping-cart/utils/applog"
"github.com/gorilla/mux"
)
// AddItemToInventory : handler function for POST /v1/inventory call
func AddItemToInventory(w http.ResponseWriter, r *http.Request) {
// authenticating user
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
as := service.AuthService{}
authService := as.NewAuthService()
errs := url.Values{}
if authService.GetUser().UserName!= "admin" {
errs.Add("data", "Forbiden, user is not 'admin'")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusForbidden)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
item := &types.Item{}
if err := json.NewDecoder(r.Body).Decode(item); err != nil {
errs.Add("data", "Invalid data")
applog.Errorf("invalid request for add item to inventory, %v", err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
applog.Info("adding item to item")
is := service.InventoryService{}
inventoryService := is.NewInventoryService()
err = inventoryService.AddToInventory(item)
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": item, "status": 1}
json.NewEncoder(w).Encode(response)
applog.Info("add to inventory request completed")
}
// ViewInventory Get All items in inventory
func ViewInventory(w http.ResponseWriter, r *http.Request) {
// authenticating user
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
items := &types.ItemList{}
applog.Info("get all items from item")
is := service.InventoryService{}
inventoryService := is.NewInventoryService()
err = inventoryService.ViewInvetory(items)
if err!=nil{
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := map[string]interface{}{"errors": err, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": items, "status": 1}
json.NewEncoder(w).Encode(response)
}
// RemoveItem : delete item from item
func RemoveItem(w http.ResponseWriter, r *http.Request) {
// authenticating user
_,err := common.CheckAuthorized(w, r)
if err!=nil {
return
}
as := service.AuthService{}
authService := as.NewAuthService()
errs := url.Values{}
if authService.GetUser().UserName!= "admin" {
errs.Add("data", "User does not have access to update inventory")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusForbidden)
response := map[string]interface{}{"errors": errs, "status": 0}
json.NewEncoder(w).Encode(response)
return
}
params := mux.Vars(r)
item := &types.Item{}
is := service.InventoryService{}
inventoryService := is.NewInventoryService()
err = inventoryService.RemoveItem(item, params["itemid"])
if err!=nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
response := map[string]interface{}{"errors": err.Error(), "status": 0}
json.NewEncoder(w).Encode(response)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
response := map[string]interface{}{"data": item, "message": "Item Deleted Successfully", "status": 1}
json.NewEncoder(w).Encode(response)
}
|
package geoip
import "sort"
type (
IPNetV4 struct {
Lo uint32
Hi uint32
}
IPNetListV4 []IPNetV4
)
func (a IPNetListV4) Contains(ip []byte) bool {
ip4 := ip4ToNum(ip)
i := sort.Search(len(a), func(i int) bool {
return a[i].Hi >= ip4
})
if i < len(a) {
return a[i].Lo <= ip4 && ip4 <= a[i].Hi
}
return false
}
func ip4ToNum(ip []byte) uint32 {
return (uint32(ip[0]) << 24) |
(uint32(ip[1]) << 16) |
(uint32(ip[2]) << 8) |
(uint32(ip[3]))
}
|
// Package quadtree implements a quadtree using rectangular partitions.
// Each point exists in a unique node in the tree or as leaf nodes.
// This implementation is based off of the d3 implementation:
// https://github.com/mbostock/d3/wiki/Quadtree-Geom
package quadtree
import (
"errors"
"math"
"github.com/paulmach/orb"
"github.com/paulmach/orb/planar"
)
var (
// ErrPointOutsideOfBounds is returned when trying to add a point
// to a quadtree and the point is outside the bounds used to create the tree.
ErrPointOutsideOfBounds = errors.New("quadtree: point outside of bounds")
)
// Quadtree implements a two-dimensional recursive spatial subdivision
// of orb.Pointers. This implementation uses rectangular partitions.
type Quadtree struct {
bound orb.Bound
root *node
}
// A FilterFunc is a function that filters the points to search for.
type FilterFunc func(p orb.Pointer) bool
// node represents a node of the quad tree. Each node stores a Value
// and has links to its 4 children
type node struct {
Value orb.Pointer
Children [4]*node
}
// New creates a new quadtree for the given bound. Added points
// must be within this bound.
func New(bound orb.Bound) *Quadtree {
return &Quadtree{bound: bound}
}
// Bound returns the bounds used for the quad tree.
func (q *Quadtree) Bound() orb.Bound {
return q.bound
}
// Add puts an object into the quad tree, must be within the quadtree bounds.
// This function is not thread-safe, ie. multiple goroutines cannot insert into
// a single quadtree.
func (q *Quadtree) Add(p orb.Pointer) error {
if p == nil {
return nil
}
point := p.Point()
if !q.bound.Contains(point) {
return ErrPointOutsideOfBounds
}
if q.root == nil {
q.root = &node{
Value: p,
}
return nil
} else if q.root.Value == nil {
q.root.Value = p
return nil
}
q.add(q.root, p, p.Point(),
// q.bound.Left(), q.bound.Right(),
// q.bound.Bottom(), q.bound.Top(),
q.bound.Min[0], q.bound.Max[0],
q.bound.Min[1], q.bound.Max[1],
)
return nil
}
// add is the recursive search to find a place to add the point
func (q *Quadtree) add(n *node, p orb.Pointer, point orb.Point, left, right, bottom, top float64) {
i := 0
// figure which child of this internal node the point is in.
if cy := (bottom + top) / 2.0; point[1] <= cy {
top = cy
i = 2
} else {
bottom = cy
}
if cx := (left + right) / 2.0; point[0] >= cx {
left = cx
i++
} else {
right = cx
}
if n.Children[i] == nil {
n.Children[i] = &node{Value: p}
return
} else if n.Children[i].Value == nil {
n.Children[i].Value = p
return
}
// proceed down to the child to see if it's a leaf yet and we can add the pointer there.
q.add(n.Children[i], p, point, left, right, bottom, top)
}
// Remove will remove the pointer from the quadtree. By default it'll match
// using the points, but a FilterFunc can be provided for a more specific test
// if there are elements with the same point value in the tree. For example:
//
// func(pointer orb.Pointer) {
// return pointer.(*MyType).ID == lookingFor.ID
// }
func (q *Quadtree) Remove(p orb.Pointer, eq FilterFunc) bool {
if eq == nil {
point := p.Point()
eq = func(pointer orb.Pointer) bool {
return point.Equal(pointer.Point())
}
}
b := q.bound
v := &findVisitor{
point: p.Point(),
filter: eq,
closestBound: &b,
minDistSquared: math.MaxFloat64,
}
newVisit(v).Visit(q.root,
// q.bound.Left(), q.bound.Right(),
// q.bound.Bottom(), q.bound.Top(),
q.bound.Min[0], q.bound.Max[0],
q.bound.Min[1], q.bound.Max[1],
)
if v.closest == nil {
return false
}
v.closest.Value = nil
// if v.closest is NOT a leaf node, values will be shuffled up into this node.
// if v.closest IS a leaf node, the call is a no-op but we can't delete
// the now empty node because we don't know the parent here.
//
// Future adds will reuse this node if applicable.
// Removing v.closest parent will cause this node to be removed,
// but the parent will be a leaf with a nil value.
removeNode(v.closest)
return true
}
// removeNode is the recursive fixing up of the tree when we remove a node.
// It will pull up a child value into it's place. It will try to remove leaf nodes
// that are now empty, since their values got pulled up.
func removeNode(n *node) bool {
i := -1
if n.Children[0] != nil {
i = 0
} else if n.Children[1] != nil {
i = 1
} else if n.Children[2] != nil {
i = 2
} else if n.Children[3] != nil {
i = 3
}
if i == -1 {
// all children are nil, can remove.
// n.value == nil because it "pulled up" (or removed) by the caller.
return true
}
n.Value = n.Children[i].Value
n.Children[i].Value = nil
removeThisChild := removeNode(n.Children[i])
if removeThisChild {
n.Children[i] = nil
}
return false
}
// Find returns the closest Value/Pointer in the quadtree.
// This function is thread safe. Multiple goroutines can read from
// a pre-created tree.
func (q *Quadtree) Find(p orb.Point) orb.Pointer {
return q.Matching(p, nil)
}
// Matching returns the closest Value/Pointer in the quadtree for which
// the given filter function returns true. This function is thread safe.
// Multiple goroutines can read from a pre-created tree.
func (q *Quadtree) Matching(p orb.Point, f FilterFunc) orb.Pointer {
if q.root == nil {
return nil
}
b := q.bound
v := &findVisitor{
point: p,
filter: f,
closestBound: &b,
minDistSquared: math.MaxFloat64,
}
newVisit(v).Visit(q.root,
// q.bound.Left(), q.bound.Right(),
// q.bound.Bottom(), q.bound.Top(),
q.bound.Min[0], q.bound.Max[0],
q.bound.Min[1], q.bound.Max[1],
)
if v.closest == nil {
return nil
}
return v.closest.Value
}
// KNearest returns k closest Value/Pointer in the quadtree.
// This function is thread safe. Multiple goroutines can read from a pre-created tree.
// An optional buffer parameter is provided to allow for the reuse of result slice memory.
// The points are returned in a sorted order, nearest first.
// This function allows defining a maximum distance in order to reduce search iterations.
func (q *Quadtree) KNearest(buf []orb.Pointer, p orb.Point, k int, maxDistance ...float64) []orb.Pointer {
return q.KNearestMatching(buf, p, k, nil, maxDistance...)
}
// KNearestMatching returns k closest Value/Pointer in the quadtree for which
// the given filter function returns true. This function is thread safe.
// Multiple goroutines can read from a pre-created tree. An optional buffer
// parameter is provided to allow for the reuse of result slice memory.
// The points are returned in a sorted order, nearest first.
// This function allows defining a maximum distance in order to reduce search iterations.
func (q *Quadtree) KNearestMatching(buf []orb.Pointer, p orb.Point, k int, f FilterFunc, maxDistance ...float64) []orb.Pointer {
if q.root == nil {
return nil
}
b := q.bound
v := &nearestVisitor{
point: p,
filter: f,
k: k,
maxHeap: make(maxHeap, 0, k+1),
closestBound: &b,
maxDistSquared: math.MaxFloat64,
}
if len(maxDistance) > 0 {
v.maxDistSquared = maxDistance[0] * maxDistance[0]
}
newVisit(v).Visit(q.root,
// q.bound.Left(), q.bound.Right(),
// q.bound.Bottom(), q.bound.Top(),
q.bound.Min[0], q.bound.Max[0],
q.bound.Min[1], q.bound.Max[1],
)
//repack result
if cap(buf) < len(v.maxHeap) {
buf = make([]orb.Pointer, len(v.maxHeap))
} else {
buf = buf[:len(v.maxHeap)]
}
for i := len(v.maxHeap) - 1; i >= 0; i-- {
buf[i] = v.maxHeap[0].point
v.maxHeap.Pop()
}
return buf
}
// InBound returns a slice with all the pointers in the quadtree that are
// within the given bound. An optional buffer parameter is provided to allow
// for the reuse of result slice memory. This function is thread safe.
// Multiple goroutines can read from a pre-created tree.
func (q *Quadtree) InBound(buf []orb.Pointer, b orb.Bound) []orb.Pointer {
return q.InBoundMatching(buf, b, nil)
}
// InBoundMatching returns a slice with all the pointers in the quadtree that are
// within the given bound and matching the give filter function. An optional buffer
// parameter is provided to allow for the reuse of result slice memory. This function
// is thread safe. Multiple goroutines can read from a pre-created tree.
func (q *Quadtree) InBoundMatching(buf []orb.Pointer, b orb.Bound, f FilterFunc) []orb.Pointer {
if q.root == nil {
return nil
}
var p []orb.Pointer
if len(buf) > 0 {
p = buf[:0]
}
v := &inBoundVisitor{
bound: &b,
pointers: p,
filter: f,
}
newVisit(v).Visit(q.root,
// q.bound.Left(), q.bound.Right(),
// q.bound.Bottom(), q.bound.Top(),
q.bound.Min[0], q.bound.Max[0],
q.bound.Min[1], q.bound.Max[1],
)
return v.pointers
}
// The visit stuff is a more go like (hopefully) implementation of the
// d3.quadtree.visit function. It is not exported, but if there is a
// good use case, it could be.
type visitor interface {
// Bound returns the current relevant bound so we can prune irrelevant nodes
// from the search. Using a pointer was benchmarked to be 5% faster than
// having to copy the bound on return. go1.9
Bound() *orb.Bound
Visit(n *node)
// Point should return the specific point being search for, or null if there
// isn't one (ie. searching by bound). This helps guide the search to the
// best child node first.
Point() orb.Point
}
// visit provides a framework for walking the quad tree.
// Currently used by the `Find` and `InBound` functions.
type visit struct {
visitor visitor
}
func newVisit(v visitor) *visit {
return &visit{
visitor: v,
}
}
func (v *visit) Visit(n *node, left, right, bottom, top float64) {
b := v.visitor.Bound()
// if left > b.Right() || right < b.Left() ||
// bottom > b.Top() || top < b.Bottom() {
// return
// }
if left > b.Max[0] || right < b.Min[0] ||
bottom > b.Max[1] || top < b.Min[1] {
return
}
if n.Value != nil {
v.visitor.Visit(n)
}
if n.Children[0] == nil && n.Children[1] == nil &&
n.Children[2] == nil && n.Children[3] == nil {
// no children check
return
}
cx := (left + right) / 2.0
cy := (bottom + top) / 2.0
i := childIndex(cx, cy, v.visitor.Point())
for j := i; j < i+4; j++ {
if n.Children[j%4] == nil {
continue
}
if k := j % 4; k == 0 {
v.Visit(n.Children[0], left, cx, cy, top)
} else if k == 1 {
v.Visit(n.Children[1], cx, right, cy, top)
} else if k == 2 {
v.Visit(n.Children[2], left, cx, bottom, cy)
} else if k == 3 {
v.Visit(n.Children[3], cx, right, bottom, cy)
}
}
}
type findVisitor struct {
point orb.Point
filter FilterFunc
closest *node
closestBound *orb.Bound
minDistSquared float64
}
func (v *findVisitor) Bound() *orb.Bound {
return v.closestBound
}
func (v *findVisitor) Point() orb.Point {
return v.point
}
func (v *findVisitor) Visit(n *node) {
// skip this pointer if we have a filter and it doesn't match
if v.filter != nil && !v.filter(n.Value) {
return
}
point := n.Value.Point()
if d := planar.DistanceSquared(point, v.point); d < v.minDistSquared {
v.minDistSquared = d
v.closest = n
d = math.Sqrt(d)
v.closestBound.Min[0] = v.point[0] - d
v.closestBound.Max[0] = v.point[0] + d
v.closestBound.Min[1] = v.point[1] - d
v.closestBound.Max[1] = v.point[1] + d
}
}
// type pointsQueueItem struct {
// point orb.Pointer
// distance float64 // distance to point and priority inside the queue
// index int // point index in queue
// }
// type pointsQueue []pointsQueueItem
// func newPointsQueue(capacity int) pointsQueue {
// // We make capacity+1 because we need additional place for the greatest element
// return make([]pointsQueueItem, 0, capacity+1)
// }
// func (pq pointsQueue) Len() int { return len(pq) }
// func (pq pointsQueue) Less(i, j int) bool {
// // We want pop longest distances so Less was inverted
// return pq[i].distance > pq[j].distance
// }
// func (pq pointsQueue) Swap(i, j int) {
// pq[i], pq[j] = pq[j], pq[i]
// pq[i].index = i
// pq[j].index = j
// }
// func (pq *pointsQueue) Push(x interface{}) {
// n := len(*pq)
// item := x.(pointsQueueItem)
// item.index = n
// *pq = append(*pq, item)
// }
// func (pq *pointsQueue) Pop() interface{} {
// old := *pq
// n := len(old)
// item := old[n-1]
// item.index = -1
// *pq = old[0 : n-1]
// return item
// }
type nearestVisitor struct {
point orb.Point
filter FilterFunc
k int
maxHeap maxHeap
closestBound *orb.Bound
maxDistSquared float64
}
func (v *nearestVisitor) Bound() *orb.Bound {
return v.closestBound
}
func (v *nearestVisitor) Point() orb.Point {
return v.point
}
func (v *nearestVisitor) Visit(n *node) {
// skip this pointer if we have a filter and it doesn't match
if v.filter != nil && !v.filter(n.Value) {
return
}
point := n.Value.Point()
if d := planar.DistanceSquared(point, v.point); d < v.maxDistSquared {
v.maxHeap.Push(n.Value, d)
if len(v.maxHeap) > v.k {
v.maxHeap.Pop()
// Actually this is a hack. We know how heap works and obtain
// top element without function call
top := v.maxHeap[0]
v.maxDistSquared = top.distance
// We have filled queue, so we start to restrict searching range
d = math.Sqrt(top.distance)
v.closestBound.Min[0] = v.point[0] - d
v.closestBound.Max[0] = v.point[0] + d
v.closestBound.Min[1] = v.point[1] - d
v.closestBound.Max[1] = v.point[1] + d
}
}
}
type inBoundVisitor struct {
bound *orb.Bound
pointers []orb.Pointer
filter FilterFunc
}
func (v *inBoundVisitor) Bound() *orb.Bound {
return v.bound
}
func (v *inBoundVisitor) Point() (p orb.Point) {
return
}
func (v *inBoundVisitor) Visit(n *node) {
if v.filter != nil && !v.filter(n.Value) {
return
}
p := n.Value.Point()
if v.bound.Min[0] > p[0] || v.bound.Max[0] < p[0] ||
v.bound.Min[1] > p[1] || v.bound.Max[1] < p[1] {
return
}
v.pointers = append(v.pointers, n.Value)
}
func childIndex(cx, cy float64, point orb.Point) int {
i := 0
if point[1] <= cy {
i = 2
}
if point[0] >= cx {
i++
}
return i
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.